Multi Source Tool Usage
Source: examples/tools/multi_source_tool_usage.py
Introduction
MCP standardizes tool connectivity, data-fusion concepts motivate combining heterogeneous signals, and RAG provides a grounding mechanism for synthesis over retrieved evidence. This example fuses MCP tools and script tools into one workflow that emits a traceable narrative artifact.
Technical Implementation
Configure
Tracerwith JSONL + console output so each run emits machine-readable traces and lifecycle logs.Build the runtime surface (public APIs only) and execute
Toolbox.invoke_dict(...)with a fixedrequest_id.Configure and invoke
Toolboxintegrations (core/script/MCP/callable) before assembling the final payload.Print a compact JSON payload including
trace_infofor deterministic tests and docs examples.
flowchart LR
A["Input prompt or scenario"] --> B["main(): runtime wiring"]
B --> C["Toolbox.invoke_dict(...)"]
C --> D["core, script, and MCP tools execute in one composed runtime"]
C --> E["Tracer JSONL + console events"]
D --> F["ExecutionResult/payload"]
E --> F
F --> G["Printed JSON output"]
1from __future__ import annotations
2
3import json
4import sys
5from pathlib import Path
6
7from design_research_agents import MCPServerConfig, ScriptToolConfig, Toolbox, Tracer
8
9
10def _source_tool_counts(runtime: Toolbox) -> dict[str, int]:
11 counts = {"core": 0, "script": 0, "mcp": 0}
12 for spec in runtime.list_tools():
13 if spec.name.startswith("script::"):
14 counts["script"] += 1
15 elif spec.name.startswith("local_core::"):
16 counts["mcp"] += 1
17 else:
18 counts["core"] += 1
19 return counts
20
21
22def _run_report() -> dict[str, object]:
23 source_text = (
24 "Design review checklist: verify latch durability, reduce assembly time, "
25 "and keep maintenance steps field-serviceable."
26 )
27
28 with Toolbox(
29 workspace_root=".",
30 enable_core_tools=True,
31 script_tools=(
32 ScriptToolConfig(
33 name="rubric_score",
34 path="examples/tools/script_tools/rubric_score.py",
35 description="Score text against a simple rubric.",
36 input_schema={
37 "type": "object",
38 "properties": {
39 "text": {"type": "string"},
40 "max_score": {"type": "integer"},
41 },
42 "required": ["text"],
43 "additionalProperties": False,
44 },
45 output_schema={"type": "object"},
46 filesystem_write=True,
47 ),
48 ),
49 mcp_servers=(
50 MCPServerConfig(
51 id="local_core",
52 command=(sys.executable, "-m", "design_research_agents._mcp_server"),
53 env={"PYTHONPATH": "src"},
54 timeout_s=20,
55 ),
56 ),
57 ) as runtime:
58 source_tool_counts = _source_tool_counts(runtime)
59 write_result = runtime.invoke_dict(
60 "fs.write_text",
61 {
62 "path": "artifacts/examples/multi_source_tool_usage_input.txt",
63 "content": source_text,
64 "overwrite": True,
65 },
66 request_id="example-multi-source-tool-usage",
67 dependencies={},
68 )
69 script_score = runtime.invoke_dict(
70 "script::rubric_score",
71 {"text": source_text, "max_score": 20},
72 request_id="example-multi-source-tool-usage",
73 dependencies={},
74 )
75 core_stats = runtime.invoke_dict(
76 "text.word_count",
77 {"text": source_text},
78 request_id="example-multi-source-tool-usage",
79 dependencies={},
80 )
81 mcp_stats = runtime.invoke_dict(
82 "local_core::text.word_count",
83 {"text": source_text},
84 request_id="example-multi-source-tool-usage",
85 dependencies={},
86 )
87 score_percent = (float(script_score["score"]) / float(script_score["max_score"])) * 100.0
88
89 report = {
90 "input_path": write_result["path"],
91 "source_tool_counts": source_tool_counts,
92 "script_score": script_score["score"],
93 "script_max_score": script_score["max_score"],
94 "core_word_count": core_stats["word_count"],
95 "mcp_word_count": mcp_stats["word_count"],
96 "word_count_match": core_stats["word_count"] == mcp_stats["word_count"],
97 "score_percent": score_percent,
98 "script_trace_path": script_score.get("trace_path"),
99 }
100 report_write = runtime.invoke_dict(
101 "fs.write_text",
102 {
103 "path": "artifacts/examples/multi_source_tool_usage_report.json",
104 "content": json.dumps(report, ensure_ascii=True, indent=2, sort_keys=True) + "\n",
105 "overwrite": True,
106 },
107 request_id="example-multi-source-tool-usage",
108 dependencies={},
109 )
110 report["report_path"] = report_write["path"]
111
112 return report
113
114
115def main() -> None:
116 """Run traced multi-source report generation."""
117 # Fixed request id keeps traces and docs output deterministic across runs.
118 request_id = "example-tools-multi-source-tool-usage-001"
119 tracer = Tracer(
120 enabled=True,
121 trace_dir=Path("artifacts/examples/traces"),
122 enable_jsonl=True,
123 enable_console=True,
124 )
125 report = tracer.run_callable(
126 agent_name="ExamplesMultiSourceToolUsage",
127 request_id=request_id,
128 input_payload={"scenario": "multi-source-tool-usage"},
129 function=_run_report,
130 )
131 assert isinstance(report, dict)
132 report["example"] = "tools/multi_source_tool_usage.py"
133 report["trace"] = tracer.trace_info(request_id)
134 print(json.dumps(report, ensure_ascii=True, indent=2, sort_keys=True))
135
136
137if __name__ == "__main__":
138 main()
Expected Results
Run Command
PYTHONPATH=src python3 examples/tools/multi_source_tool_usage.py
Example output captured with DRA_EXAMPLE_LLM_MODE=deterministic
(timestamps, durations, and trace filenames vary by run):
{
"core_word_count": 14,
"example": "tools/multi_source_tool_usage.py",
"input_path": "artifacts/examples/<truncated-input-path>",
"mcp_word_count": 14,
"report_path": "artifacts/examples/<truncated-report-path>",
"score_percent": 10.0,
"script_max_score": 20,
"script_score": 2,
"script_trace_path": "artifacts/examples/traces/run_20260222T162210Z_example-script-rubric-score-001.jsonl",
"source_tool_counts": {
"core": 23,
"mcp": 23,
"script": 1
},
"trace": {
"request_id": "example-tools-multi-source-tool-usage-001",
"trace_dir": "artifacts/examples/traces",
"trace_path": "artifacts/examples/traces/run_20260222T162209Z_example-tools-multi-source-tool-usage-001.jsonl"
},
"word_count_match": true
}