Workflow Prompt Mode#
Source: examples/workflow/workflow_prompt_mode.py
Introduction#
ReAct and Plan-and-Solve motivate explicit control over reasoning phases, and JSON Schema formalizes
structured inputs/outputs when prompt-mode steps need predictable contracts. This example shows prompt-mode
workflow composition with agent, logic, and tool steps under one runtime, including one packaged-problem-like
object passed directly to Workflow.run(...).
Technical Implementation#
Configure
Tracerwith JSONL + console output so each run emits machine-readable traces and lifecycle logs.Build the runtime surface (public APIs only) and execute
Workflow.run(...)with a fixedrequest_id, once from a packaged-problem-like object and once from a plain fallback string prompt.Configure and invoke
Toolboxintegrations (core/script/MCP/callable) before assembling the final payload.Print a compact JSON payload including
trace_infofor deterministic tests and docs examples.
The diagram below is generated from the example’s configured Workflow.
flowchart LR
workflow_entry["Workflow Entrypoint"]
step_1["router<br/>LogicStep"]
step_2["draft_agent<br/>DelegateStep<br/>delegate=_DocDelegate"]
step_3["parse_agent_json<br/>ToolStep<br/>tool=text.extract_json"]
step_4["finalize_agent<br/>LogicStep"]
step_5["draft_template<br/>LogicStep"]
step_6["finalize_template<br/>LogicStep"]
workflow_entry --> step_1
step_1 -. "route=agent_path" .-> step_2
step_1 -. "route=template_path" .-> step_5
step_2 --> step_3
step_3 --> step_4
step_5 --> step_6
1from __future__ import annotations
2
3import json
4from collections.abc import Mapping
5from pathlib import Path
6
7import design_research_agents as drag
8
9WORKFLOW_DIAGRAM_DIRECTION = "LR"
10
11
12class _DocDelegate:
13 """Minimal delegate stub used only for docs-diagram workflow construction."""
14
15 def run(self, prompt: str, *, request_id: str | None = None, dependencies: object | None = None) -> object:
16 del prompt, request_id, dependencies
17 raise RuntimeError("Docs-only delegate stub should not be executed.")
18
19
20class _ExampleProblemMetadata:
21 def __init__(self, *, problem_id: str, title: str, kind: str) -> None:
22 self.problem_id = problem_id
23 self.title = title
24 self.kind = kind
25
26
27class _ExamplePackagedProblem:
28 """Tiny packaged-problem stand-in used to document prompt-like workflow inputs."""
29
30 def __init__(self) -> None:
31 self.metadata = _ExampleProblemMetadata(
32 problem_id="workflow-prompt-problem-001",
33 title="Reduce onboarding friction",
34 kind="design-brief",
35 )
36 self.candidate_kind = "json-brief"
37 self.family = "workflow-example"
38
39 def render_brief(self) -> str:
40 return "Draft a design brief for reducing onboarding friction in a medical-device setup flow."
41
42
43def _summarize_run(result: drag.ExecutionResult) -> dict[str, object]:
44 return result.summary()
45
46
47def _problem_metadata_from_context(context: Mapping[str, object]) -> dict[str, object]:
48 metadata = context.get("problem_metadata", {})
49 return dict(metadata) if isinstance(metadata, dict) else {}
50
51
52def build_example_workflow(
53 *,
54 tracer: drag.Tracer | None = None,
55 tool_runtime: object | None = None,
56 writer_agent: object | None = None,
57) -> drag.Workflow:
58 """Build the routed prompt-mode workflow used for docs diagrams and runtime execution."""
59 resolved_writer_agent = writer_agent or _DocDelegate()
60 return drag.Workflow(
61 tool_runtime=tool_runtime,
62 tracer=tracer,
63 steps=[
64 drag.LogicStep(
65 step_id="router",
66 handler=lambda context: {
67 "route": (
68 "template_path" if str(context["prompt"]).lower().startswith("template:") else "agent_path"
69 )
70 },
71 route_map={
72 "agent_path": ("draft_agent",),
73 "template_path": ("draft_template",),
74 },
75 ),
76 drag.DelegateStep(
77 step_id="draft_agent",
78 delegate=resolved_writer_agent,
79 dependencies=("router",),
80 prompt_builder=lambda context: (
81 f"Write one JSON object with keys title and summary for this design request: {context['prompt']}"
82 ),
83 ),
84 drag.ToolStep(
85 step_id="parse_agent_json",
86 tool_name="text.extract_json",
87 dependencies=("draft_agent",),
88 input_builder=lambda context: {
89 "text": context["dependency_results"]["draft_agent"]["output"]["output"]["model_text"]
90 },
91 ),
92 drag.LogicStep(
93 step_id="finalize_agent",
94 dependencies=("parse_agent_json",),
95 handler=lambda context: {
96 "branch": "agent",
97 "problem_id": _problem_metadata_from_context(context).get("problem_id", ""),
98 "candidate_kind": _problem_metadata_from_context(context).get("candidate_kind", ""),
99 "title": context["dependency_results"]["parse_agent_json"]["output"]["result"]["json"].get(
100 "title", ""
101 ),
102 "summary": context["dependency_results"]["parse_agent_json"]["output"]["result"]["json"].get(
103 "summary", ""
104 ),
105 },
106 ),
107 drag.LogicStep(
108 step_id="draft_template",
109 dependencies=("router",),
110 handler=lambda context: {
111 "title": "Template fallback design brief",
112 "summary": f"Template mode output for: {context['prompt']}",
113 },
114 ),
115 drag.LogicStep(
116 step_id="finalize_template",
117 dependencies=("draft_template",),
118 handler=lambda context: {
119 "branch": "template",
120 "title": context["dependency_results"]["draft_template"]["output"]["title"],
121 "summary": context["dependency_results"]["draft_template"]["output"]["summary"],
122 },
123 ),
124 ],
125 )
126
127
128def main() -> None:
129 """Run reusable prompt-mode workflow for one packaged problem and one fallback prompt."""
130 tracer = drag.Tracer(
131 enabled=True,
132 trace_dir=Path("artifacts/examples/traces"),
133 enable_jsonl=True,
134 enable_console=True,
135 )
136 # Run the prompt-mode workflow using public runtime surfaces. Using this with statement will automatically
137 # shut down the managed client and tool runtime when the example is done.
138 with drag.Toolbox() as tool_runtime, drag.LlamaCppServerLLMClient() as llm_client:
139 writer_agent = drag.DirectLLMCall(llm_client=llm_client, tracer=tracer)
140 workflow = build_example_workflow(
141 tracer=tracer,
142 tool_runtime=tool_runtime,
143 writer_agent=writer_agent,
144 )
145
146 # Keep per-branch request ids stable so prompt-mode variants are easy to compare.
147 agent_request_id = "example-workflow-prompt-design-agent-001"
148 # Keep per-branch request ids stable so prompt-mode variants are easy to compare.
149 template_request_id = "example-workflow-prompt-design-template-001"
150 packaged_problem = _ExamplePackagedProblem()
151 agent_result = workflow.run(
152 packaged_problem,
153 request_id=agent_request_id,
154 )
155 template_result = workflow.run(
156 ("template: Produce a deterministic fallback brief for manufacturability review findings."),
157 request_id=template_request_id,
158 )
159
160 # Print the results
161 print(
162 json.dumps(
163 {
164 "agent_branch_run": _summarize_run(agent_result),
165 "template_branch_run": _summarize_run(template_result),
166 },
167 ensure_ascii=True,
168 indent=2,
169 sort_keys=True,
170 )
171 )
172
173
174if __name__ == "__main__":
175 main()
Expected Results#
Run Command
PYTHONPATH=src python3 examples/workflow/workflow_prompt_mode.py
Example output shape (values vary by run):
{
"agent_branch_run": {
"success": true,
"final_output": "<evaluation-ready final_output payload>",
"terminated_reason": "<string-or-null>",
"error": null,
"trace": {
"request_id": "<request-id>",
"trace_dir": "artifacts/examples/traces",
"trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
}
},
"template_branch_run": {
"success": true,
"final_output": "<example-specific payload>",
"terminated_reason": "<string-or-null>",
"error": null,
"trace": {
"request_id": "<request-id>",
"trace_dir": "artifacts/examples/traces",
"trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
}
}
}