Ollama Local Client

Source: examples/clients/ollama_local_client.py

Introduction

Ollama operationalizes local model serving, the OpenAI Responses API provides a common contract surface, and HELM underlines why comparable execution conditions matter in benchmarking. This example verifies the Ollama client integration path under the project tracing/runtime conventions.

Technical Implementation

  1. Configure Tracer with JSONL + console output so each run emits machine-readable traces and lifecycle logs.

  2. Build the runtime surface (public APIs only) and execute OllamaLLMClient.generate(...) with a fixed request_id.

  3. Construct LLMRequest inputs and call generate through the selected client implementation.

  4. Print a compact JSON payload including trace_info for deterministic tests and docs examples.

        flowchart LR
    A["Input prompt or scenario"] --> B["main(): runtime wiring"]
    B --> C["OllamaLLMClient.generate(...)"]
    C --> D["LLMRequest/LLMResponse contracts wrap provider behavior"]
    C --> E["Tracer JSONL + console events"]
    D --> F["ExecutionResult/payload"]
    E --> F
    F --> G["Printed JSON output"]
    
 1from __future__ import annotations
 2
 3import json
 4from pathlib import Path
 5
 6from design_research_agents import OllamaLLMClient, Tracer
 7from design_research_agents.llm import LLMMessage, LLMRequest
 8
 9
10def _build_payload() -> dict[str, object]:
11    # Run the managed Ollama client using public runtime APIs. Using this with statement will automatically
12    # shut down the managed local server when the example is done.
13    with OllamaLLMClient(
14        name="ollama-local-dev",
15        default_model="qwen2.5:1.5b-instruct",
16        host="127.0.0.1",
17        port=11434,
18        manage_server=True,
19        ollama_executable="ollama",
20        auto_pull_model=False,
21        startup_timeout_seconds=60.0,
22        poll_interval_seconds=0.25,
23        request_timeout_seconds=60.0,
24        max_retries=2,
25        model_patterns=("qwen2.5:*", "llama3:*"),
26    ) as client:
27        description = client.describe()
28        prompt = "Give one sentence on when to use local model pull automation."
29        response = client.generate(
30            LLMRequest(
31                messages=(
32                    LLMMessage(role="system", content="You are a concise engineering design assistant."),
33                    LLMMessage(role="user", content=prompt),
34                ),
35                model=client.default_model(),
36                temperature=0.0,
37                max_tokens=120,
38            )
39        )
40        llm_call = {
41            "prompt": prompt,
42            "response_text": response.text,
43            "response_model": response.model,
44            "response_provider": response.provider,
45            "response_has_text": bool(response.text.strip()),
46        }
47        return {
48            "client_class": description["client_class"],
49            "default_model": description["default_model"],
50            "llm_call": llm_call,
51            "backend": description["backend"],
52            "capabilities": description["capabilities"],
53            "server": description["server"],
54        }
55
56
57def main() -> None:
58    """Run traced Ollama client call payload."""
59    # Fixed request id keeps traces and docs output deterministic across runs.
60    request_id = "example-clients-ollama-local-call-001"
61    tracer = Tracer(
62        enabled=True,
63        trace_dir=Path("artifacts/examples/traces"),
64        enable_jsonl=True,
65        enable_console=True,
66    )
67    payload = tracer.run_callable(
68        agent_name="ExamplesOllamaClientCall",
69        request_id=request_id,
70        input_payload={"scenario": "ollama-local-client-call"},
71        function=_build_payload,
72    )
73    assert isinstance(payload, dict)
74    payload["example"] = "clients/ollama_local_client.py"
75    payload["trace"] = tracer.trace_info(request_id)
76    # Print the results
77    print(json.dumps(payload, ensure_ascii=True, indent=2, sort_keys=True))
78
79
80if __name__ == "__main__":
81    main()

Expected Results

Run Command

PYTHONPATH=src python3 examples/clients/ollama_local_client.py

Example output captured with DRA_EXAMPLE_LLM_MODE=deterministic (timestamps, durations, and trace filenames vary by run):

{
  "backend": {
    "base_url": "http://127.0.0.1:11434",
    "default_model": "qwen2.5:1.5b-instruct",
    "host": "127.0.0.1",
    "kind": "ollama",
    "max_retries": 2,
    "model_patterns": [
      "qwen2.5:*",
      "llama3:*"
    ],
    "name": "ollama-local-dev",
    "port": 11434
  },
  "capabilities": {
    "json_mode": "prompt+validate",
    "max_context_tokens": null,
    "streaming": false,
    "tool_calling": "best_effort",
    "vision": false
  },
  "client_class": "OllamaLLMClient",
  "default_model": "qwen2.5:1.5b-instruct",
  "example": "clients/ollama_local_client.py",
  "llm_call": {
    "prompt": "Give one sentence on when to use local model pull automation.",
    "response_has_text": true,
    "response_model": "qwen2.5:1.5b-instruct",
    "response_provider": "example-test-monkeypatch",
    "response_text": "Use automated local pulls when startup reliability matters more than cold-start time."
  },
  "server": {
    "host": "127.0.0.1",
    "kind": "ollama",
    "managed": true,
    "port": 11434
  },
  "trace": {
    "request_id": "example-clients-ollama-local-call-001",
    "trace_dir": "artifacts/examples/traces",
    "trace_path": "artifacts/examples/traces/run_20260222T162206Z_example-clients-ollama-local-call-001.jsonl"
  }
}

References