Anthropic Service Client#
Source: examples/clients/anthropic_service_client.py
Introduction#
Anthropic hosted inference is useful when teams want strong instruction-following and tool-use support from one managed API while keeping application code on provider-neutral LLM contracts. This example exercises the Anthropic service client path with trace capture and deterministic output support for CI.
Technical Implementation#
Configure
Tracerwith JSONL + console sinks so each run emits machine-readable traces.Build runtime inputs through public package APIs and invoke
AnthropicServiceLLMClient.generate(...).Construct
LLMRequestpayload fields and execute one representative remote-style call.Print a compact JSON payload that includes trace metadata for docs and deterministic tests.
flowchart LR
A["Prompt input"] --> B["main(): tracing setup"]
B --> C["AnthropicServiceLLMClient.generate(...)"]
C --> D["LLMRequest and LLMResponse contracts"]
C --> E["Tracer JSONL + console events"]
D --> F["Output payload"]
E --> F
F --> G["Printed JSON result"]
1from __future__ import annotations
2
3import json
4from pathlib import Path
5
6import design_research_agents as drag
7
8
9def _build_payload() -> dict[str, object]:
10 # Build the hosted Anthropic client using public runtime APIs, then execute one representative request.
11 client = drag.AnthropicServiceLLMClient(
12 name="anthropic-prod",
13 default_model="claude-3-5-haiku-latest",
14 api_key_env="ANTHROPIC_API_KEY",
15 api_key="example-key-for-config-demo",
16 base_url="https://api.anthropic.com",
17 max_retries=3,
18 model_patterns=("claude-3-5-haiku-latest", "claude-3-5-*"),
19 )
20 description = client.describe()
21 prompt = "In one sentence, when should teams run architecture red-team reviews?"
22 response = client.generate(
23 drag.LLMRequest(
24 messages=(
25 drag.LLMMessage(role="system", content="You are a concise engineering design assistant."),
26 drag.LLMMessage(role="user", content=prompt),
27 ),
28 model=client.default_model(),
29 temperature=0.0,
30 max_tokens=120,
31 )
32 )
33 llm_call = {
34 "prompt": prompt,
35 "response_text": response.text,
36 "response_model": response.model,
37 "response_provider": response.provider,
38 "response_has_text": bool(response.text.strip()),
39 }
40 return {
41 "client_class": description["client_class"],
42 "default_model": description["default_model"],
43 "llm_call": llm_call,
44 "backend": description["backend"],
45 "capabilities": description["capabilities"],
46 "server": description["server"],
47 }
48
49
50def main() -> None:
51 """Run traced Anthropic service client call payload."""
52 # Fixed request id keeps traces and docs output deterministic across runs.
53 request_id = "example-clients-anthropic-service-call-001"
54 tracer = drag.Tracer(
55 enabled=True,
56 trace_dir=Path("artifacts/examples/traces"),
57 enable_jsonl=True,
58 enable_console=True,
59 )
60 payload = tracer.run_callable(
61 agent_name="ExamplesAnthropicServiceClientCall",
62 request_id=request_id,
63 input_payload={"scenario": "anthropic-service-client-call"},
64 function=_build_payload,
65 )
66 assert isinstance(payload, dict)
67 payload["example"] = "clients/anthropic_service_client.py"
68 payload["trace"] = tracer.trace_info(request_id)
69 # Print the results
70 print(json.dumps(payload, ensure_ascii=True, indent=2, sort_keys=True))
71
72
73if __name__ == "__main__":
74 main()
Expected Results#
Run Command
PYTHONPATH=src python3 examples/clients/anthropic_service_client.py
Example output captured with DRA_EXAMPLE_LLM_MODE=deterministic
(timestamps, durations, and trace filenames vary by run):
{
"backend": {
"api_key_env": "ANTHROPIC_API_KEY",
"base_url": "https://api.anthropic.com",
"default_model": "claude-3-5-haiku-latest",
"kind": "anthropic_service",
"max_retries": 3,
"model_patterns": [
"claude-3-5-haiku-latest",
"claude-3-5-*"
],
"name": "anthropic-prod"
},
"capabilities": {
"json_mode": "native",
"max_context_tokens": null,
"streaming": true,
"tool_calling": "native",
"vision": false
},
"client_class": "AnthropicServiceLLMClient",
"default_model": "claude-3-5-haiku-latest",
"example": "clients/anthropic_service_client.py",
"llm_call": {
"prompt": "In one sentence, when should teams run architecture red-team reviews?",
"response_has_text": true,
"response_model": "claude-3-5-haiku-latest",
"response_provider": "example-test-monkeypatch",
"response_text": "Run architecture red-team reviews before committing high-impact changes with uncertain failure modes."
},
"server": null,
"trace": {
"request_id": "example-clients-anthropic-service-call-001",
"trace_dir": "artifacts/examples/traces",
"trace_path": "artifacts/examples/traces/run_20260222T162206Z_example-clients-anthropic-service-call-001.jsonl"
}
}