Coordination Patterns

Source: examples/patterns/coordination_patterns.py

Introduction

Blackboard-system architecture motivates shared-state collaboration among specialized problem solvers, AutoGen informs practical multi-agent implementation choices, and Human-AI collaboration by design clarifies governance value in shared workspace reasoning. This example compares round-based coordination and blackboard-specialized runs with explicit execution records.

Technical Implementation

  1. Configure Tracer with JSONL + console output so each run emits machine-readable traces and lifecycle logs.

  2. Build the runtime surface (public APIs only) and execute RoundBasedCoordinationPattern.run(...) with a fixed request_id.

  3. Capture structured outputs from runtime execution and preserve termination metadata for analysis.

  4. Print a compact JSON payload including trace_info for deterministic tests and docs examples.

        flowchart LR
    A["Input prompt or scenario"] --> B["main(): runtime wiring"]
    B --> C["RoundBasedCoordinationPattern.run(...)"]
    C --> D["blackboard workers contribute and aggregate shared state"]
    C --> E["Tracer JSONL + console events"]
    D --> F["ExecutionResult/payload"]
    E --> F
    F --> G["Printed JSON output"]
    
 1from __future__ import annotations
 2
 3import json
 4from pathlib import Path
 5
 6from design_research_agents import (
 7    DirectLLMCall,
 8    ExecutionResult,
 9    LlamaCppServerLLMClient,
10    Tracer,
11)
12from design_research_agents.patterns import BlackboardPattern, RoundBasedCoordinationPattern
13
14
15def _summarize(result: ExecutionResult) -> dict[str, object]:
16    return result.summary()
17
18
19def main() -> None:
20    """Run one round-based coordination and one blackboard pass."""
21    tracer = Tracer(
22        enabled=True,
23        trace_dir=Path("artifacts/examples/traces"),
24        enable_jsonl=True,
25        enable_console=True,
26    )
27
28    with LlamaCppServerLLMClient(context_window=16384) as llm_client:
29        peer_a = DirectLLMCall(llm_client=llm_client, tracer=tracer)
30        peer_b = DirectLLMCall(llm_client=llm_client, tracer=tracer)
31
32        # Split ids by pattern variant to keep networked and blackboard traces distinct.
33
34        coordination_request_id = "example-workflow-round-based-coordination-design-001"
35        coordination = RoundBasedCoordinationPattern(
36            peers={
37                "peer_b": peer_b,
38                "peer_a": peer_a,
39            },
40            max_rounds=1,
41            tracer=tracer,
42        )
43        coordination_result = coordination.run(
44            "Exchange one concise proposal for a field-serviceable sensor enclosure.",
45            request_id=coordination_request_id,
46        )
47
48        # Split ids by pattern variant to keep networked and blackboard traces distinct.
49
50        blackboard_request_id = "example-workflow-blackboard-design-001"
51        blackboard = BlackboardPattern(
52            peers={
53                "peer_b": peer_b,
54                "peer_a": peer_a,
55            },
56            max_rounds=1,
57            stability_rounds=1,
58            tracer=tracer,
59        )
60        blackboard_result = blackboard.run(
61            "Compare two concept options and make one concise serviceability recommendation.",
62            request_id=blackboard_request_id,
63        )
64
65    print(
66        json.dumps(
67            {
68                "blackboard": _summarize(blackboard_result),
69                "round_based_coordination": _summarize(coordination_result),
70            },
71            ensure_ascii=True,
72            indent=2,
73            sort_keys=True,
74        )
75    )
76
77
78if __name__ == "__main__":
79    main()

Expected Results

Run Command

PYTHONPATH=src python3 examples/patterns/coordination_patterns.py

Example output shape (values vary by run):

{
  "round_based_coordination": {
    "success": true,
    "final_output": "<example-specific payload>",
    "terminated_reason": "<string-or-null>",
    "error": null,
    "trace": {
      "request_id": "<request-id>",
      "trace_dir": "artifacts/examples/traces",
      "trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
    }
  },
  "blackboard": {
    "success": true,
    "final_output": "<example-specific payload>",
    "terminated_reason": "<string-or-null>",
    "error": null,
    "trace": {
      "request_id": "<request-id>",
      "trace_dir": "artifacts/examples/traces",
      "trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
    }
  }
}

References