Ralph Loop#

Source: examples/patterns/ralph_loop.py

Introduction#

Ralph loops are role-programmed, not fixed two-role propose/critic cycles: each round executes an ordered role lineup, then a dedicated evaluator decides whether consensus quality is high enough. This example demonstrates a four-role configuration with synthesis selection and threshold stopping.

Technical Implementation#

  1. Configure Tracer with JSONL + console output so each run emits machine-readable traces and lifecycle logs.

  2. Build role-specific delegates with DirectLLMCall over one managed LlamaCppServerLLMClient.

  3. Execute RalphLoopPattern.run(...) with dynamic roles, evaluator role id, and typed LoopConfig.

  4. Print a compact JSON payload including trace_info for deterministic tests and docs examples.

        flowchart LR
    A["Input prompt or scenario"] --> B["main(): runtime wiring"]
    B --> C["RalphLoopPattern.run(...)"]
    C --> D["role batch executes proposer/critic/synthesizer/evaluator each round"]
    C --> E["evaluator score compared to consensus threshold"]
    D --> F["ExecutionResult/payload"]
    E --> F
    F --> G["Printed JSON output"]
    
  1from __future__ import annotations
  2
  3import json
  4from pathlib import Path
  5
  6from design_research_agents import DirectLLMCall, LlamaCppServerLLMClient, Tracer
  7from design_research_agents.patterns import RalphLoopPattern
  8
  9_EXAMPLE_LLAMA_CLIENT_KWARGS = {
 10    "model": "Qwen_Qwen3-4B-Instruct-2507-Q4_K_M.gguf",
 11    "hf_model_repo_id": "bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF",
 12    "api_model": "qwen3-4b-instruct-2507-q4km",
 13    "context_window": 8192,
 14    "startup_timeout_seconds": 240.0,
 15    "request_timeout_seconds": 240.0,
 16}
 17
 18
 19def main() -> None:
 20    """Run one Ralph loop workflow and print JSON summary."""
 21    request_id = "example-pattern-ralph-loop-design-001"
 22    tracer = Tracer(
 23        enabled=True,
 24        trace_dir=Path("artifacts/examples/traces"),
 25        enable_jsonl=True,
 26        enable_console=True,
 27    )
 28    with LlamaCppServerLLMClient(**_EXAMPLE_LLAMA_CLIENT_KWARGS) as llm_client:
 29        proposer = DirectLLMCall(
 30            llm_client=llm_client,
 31            system_prompt=("You are a design proposer. Return concise JSON with proposal options and intended change."),
 32            tracer=tracer,
 33        )
 34        critic = DirectLLMCall(
 35            llm_client=llm_client,
 36            system_prompt="You are a design critic. Return concise JSON with risks and revision advice.",
 37            tracer=tracer,
 38        )
 39        synthesizer = DirectLLMCall(
 40            llm_client=llm_client,
 41            system_prompt=(
 42                "You are a synthesis role. Merge proposal + critique into one implementation-ready JSON summary."
 43            ),
 44            tracer=tracer,
 45        )
 46        evaluator = DirectLLMCall(
 47            llm_client=llm_client,
 48            system_prompt=("You are the evaluator. Return JSON with numeric score in [0,1] and brief rationale."),
 49            tracer=tracer,
 50        )
 51
 52        pattern = RalphLoopPattern(
 53            roles=(
 54                RalphLoopPattern.RoleSpec(
 55                    role_id="proposer",
 56                    delegate=proposer,
 57                    prompt_template=(
 58                        "Task: {task}\nIteration: {iteration}\nCurrent selected output:"
 59                        " {selected_output_json}\nReturn JSON for the next proposal."
 60                    ),
 61                ),
 62                RalphLoopPattern.RoleSpec(
 63                    role_id="critic",
 64                    delegate=critic,
 65                    prompt_template=(
 66                        "Task: {task}\nIteration: {iteration}\nPrior role outputs:"
 67                        " {prior_role_outputs_json}\nReturn JSON critique for the proposer."
 68                    ),
 69                ),
 70                RalphLoopPattern.RoleSpec(
 71                    role_id="synthesizer",
 72                    delegate=synthesizer,
 73                    prompt_template=(
 74                        "Task: {task}\nIteration: {iteration}\nPrior role outputs:"
 75                        " {prior_role_outputs_json}\nReturn JSON synthesis ready for evaluation."
 76                    ),
 77                ),
 78                RalphLoopPattern.RoleSpec(
 79                    role_id="evaluator",
 80                    delegate=evaluator,
 81                    prompt_template=(
 82                        "Task: {task}\nIteration: {iteration}\nCandidate synthesis:"
 83                        " {selected_output_json}\nRole outputs: {prior_role_outputs_json}\n"
 84                        "Return JSON with score in [0,1]."
 85                    ),
 86                ),
 87            ),
 88            evaluator_role_id="evaluator",
 89            loop_config=RalphLoopPattern.LoopConfig(
 90                max_iterations=3,
 91                consensus_threshold=0.8,
 92                selection_strategy="best_score",
 93            ),
 94            tracer=tracer,
 95        )
 96
 97        result = pattern.run(
 98            "Refine a field-serviceable edge-device enclosure concept.",
 99            request_id=request_id,
100        )
101    print(json.dumps(result.summary(), ensure_ascii=True, indent=2, sort_keys=True))
102
103
104if __name__ == "__main__":
105    main()

Expected Results#

Run Command

PYTHONPATH=src python3 examples/patterns/ralph_loop.py

Example output shape (values vary by run):

{
  "success": true,
  "final_output": "<example-specific payload>",
  "terminated_reason": "<string-or-null>",
  "error": null,
  "trace": {
    "request_id": "<request-id>",
    "trace_dir": "artifacts/examples/traces",
    "trace_path": "artifacts/examples/traces/run_<timestamp>_<request_id>.jsonl"
  }
}

References#