Workflow Prompt Mode
Source: examples/workflow/workflow_prompt_mode.py
Introduction
ReAct and Plan-and-Solve motivate explicit control over reasoning phases, and JSON Schema formalizes structured inputs/outputs when prompt-mode steps need predictable contracts. This example shows prompt-mode workflow composition with agent, logic, and tool steps under one runtime.
Technical Implementation
Configure
Tracerwith JSONL + console output so each run emits machine-readable traces and lifecycle logs.Build the runtime surface (public APIs only) and execute
Workflow.run(...)with a fixedrequest_id.Configure and invoke
Toolboxintegrations (core/script/MCP/callable) before assembling the final payload.Print a compact JSON payload including
trace_infofor deterministic tests and docs examples.
flowchart LR
A["Input prompt or scenario"] --> B["main(): runtime wiring"]
B --> C["Workflow.run(...)"]
C --> D["WorkflowRuntime schedules step graph (DelegateStep, LogicStep, ToolStep)"]
C --> E["Tracer JSONL + console events"]
D --> F["ExecutionResult/payload"]
E --> F
F --> G["Printed JSON output"]
1from __future__ import annotations
2
3import json
4from pathlib import Path
5
6from design_research_agents import (
7 DelegateStep,
8 DirectLLMCall,
9 ExecutionResult,
10 LlamaCppServerLLMClient,
11 LogicStep,
12 Toolbox,
13 ToolStep,
14 Tracer,
15 Workflow,
16)
17
18
19def _summarize_run(result: ExecutionResult) -> dict[str, object]:
20 return result.summary()
21
22
23def main() -> None:
24 """Run reusable prompt-mode workflow for two routed design requests."""
25 tracer = Tracer(
26 enabled=True,
27 trace_dir=Path("artifacts/examples/traces"),
28 enable_jsonl=True,
29 enable_console=True,
30 )
31 # Run the prompt-mode workflow using public runtime surfaces. Using this with statement will automatically
32 # shut down the managed client and tool runtime when the example is done.
33 with Toolbox() as tool_runtime, LlamaCppServerLLMClient() as llm_client:
34 writer_agent = DirectLLMCall(llm_client=llm_client, tracer=tracer)
35
36 workflow_steps = [
37 LogicStep(
38 step_id="router",
39 handler=lambda context: {
40 "route": (
41 "template_path" if str(context["prompt"]).lower().startswith("template:") else "agent_path"
42 )
43 },
44 route_map={
45 "agent_path": ("draft_agent",),
46 "template_path": ("draft_template",),
47 },
48 ),
49 DelegateStep(
50 step_id="draft_agent",
51 delegate=writer_agent,
52 dependencies=("router",),
53 prompt_builder=lambda context: (
54 f"Write one JSON object with keys title and summary for this design request: {context['prompt']}"
55 ),
56 ),
57 ToolStep(
58 step_id="parse_agent_json",
59 tool_name="text.extract_json",
60 dependencies=("draft_agent",),
61 input_builder=lambda context: {
62 "text": context["dependency_results"]["draft_agent"]["output"]["output"]["model_text"]
63 },
64 ),
65 LogicStep(
66 step_id="finalize_agent",
67 dependencies=("parse_agent_json",),
68 handler=lambda context: {
69 "branch": "agent",
70 "title": context["dependency_results"]["parse_agent_json"]["output"]["result"]["json"].get(
71 "title", ""
72 ),
73 "summary": context["dependency_results"]["parse_agent_json"]["output"]["result"]["json"].get(
74 "summary", ""
75 ),
76 },
77 ),
78 LogicStep(
79 step_id="draft_template",
80 dependencies=("router",),
81 handler=lambda context: {
82 "title": "Template fallback design brief",
83 "summary": f"Template mode output for: {context['prompt']}",
84 },
85 ),
86 LogicStep(
87 step_id="finalize_template",
88 dependencies=("draft_template",),
89 handler=lambda context: {
90 "branch": "template",
91 "title": context["dependency_results"]["draft_template"]["output"]["title"],
92 "summary": context["dependency_results"]["draft_template"]["output"]["summary"],
93 },
94 ),
95 ]
96
97 workflow = Workflow(
98 tool_runtime=tool_runtime,
99 steps=workflow_steps,
100 tracer=tracer,
101 )
102
103 # Keep per-branch request ids stable so prompt-mode variants are easy to compare.
104 agent_request_id = "example-workflow-prompt-design-agent-001"
105 # Keep per-branch request ids stable so prompt-mode variants are easy to compare.
106 template_request_id = "example-workflow-prompt-design-template-001"
107 agent_result = workflow.run(
108 "Draft a design brief for reducing onboarding friction in a medical-device setup flow.",
109 request_id=agent_request_id,
110 )
111 template_result = workflow.run(
112 ("template: Produce a deterministic fallback brief for manufacturability review findings."),
113 request_id=template_request_id,
114 )
115
116 # Print the results
117 print(
118 json.dumps(
119 {
120 "agent_branch_run": _summarize_run(agent_result),
121 "template_branch_run": _summarize_run(template_result),
122 },
123 ensure_ascii=True,
124 indent=2,
125 sort_keys=True,
126 )
127 )
128
129
130if __name__ == "__main__":
131 main()
Expected Results
Run Command
PYTHONPATH=src python3 examples/workflow/workflow_prompt_mode.py
Example output shape (values vary by run):
{
"agent_branch_run": {
"success": true,
"final_output": "<example-specific payload>",
"terminated_reason": "<string-or-null>",
"error": null,
"trace": {
"request_id": "<request-id>",
"trace_dir": "artifacts/examples/traces",
"trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
}
},
"template_branch_run": {
"success": true,
"final_output": "<example-specific payload>",
"terminated_reason": "<string-or-null>",
"error": null,
"trace": {
"request_id": "<request-id>",
"trace_dir": "artifacts/examples/traces",
"trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
}
}
}