Seeded Random Baseline Agent#
Source: examples/agents/seeded_random_baseline_agent.py
Introduction#
This example treats a seeded random baseline as a first-class study participant for packaged-problem-style
experiments. It keeps the setup dependency-light by using a local decision-problem stub that mirrors the
public candidate-iteration contract instead of importing sibling repositories, while still using the same
run(prompt, dependencies=...) contract as the other public agents.
Technical Implementation#
Define a small packaged-problem-style decision stub with
iter_candidates()andevaluate().Run
SeededRandomBaselineAgentwith a fixed seed and pass the packaged problem throughdependenciesso the sampled control candidate is reproducible.Compare the random control condition against a deterministic greedy baseline that always picks the highest-scoring candidate.
Print JSON that could be dropped into lightweight experiment wiring or docs examples.
flowchart LR
A["Local decision problem stub"] --> B["SeededRandomBaselineAgent(seed=7)"]
A --> C["Greedy comparator"]
B --> D["Random control candidate"]
C --> E["Deterministic candidate"]
D --> F["JSON comparison output"]
E --> F
1from __future__ import annotations
2
3import json
4from dataclasses import dataclass
5
6import design_research_agents as drag
7
8
9@dataclass(frozen=True)
10class _ProblemMetadata:
11 """Minimal metadata stub for the local example problem."""
12
13 problem_id: str
14
15
16class _HeatSinkDecisionProblem:
17 """Tiny local decision problem exposing the public candidate iterator."""
18
19 def __init__(self) -> None:
20 """Initialize the local benchmark stub."""
21 self.metadata = _ProblemMetadata(problem_id="local_heat_sink_layout")
22 self._candidates = (
23 {"fin_count": 4.0, "gap_mm": 2.5, "wall_mm": 1.0},
24 {"fin_count": 6.0, "gap_mm": 2.0, "wall_mm": 1.0},
25 {"fin_count": 8.0, "gap_mm": 2.0, "wall_mm": 1.5},
26 )
27
28 def iter_candidates(self) -> tuple[dict[str, float], ...]:
29 """Return admissible candidates in deterministic order."""
30 return tuple(dict(candidate) for candidate in self._candidates)
31
32 def evaluate(self, candidate: dict[str, float]) -> dict[str, float]:
33 """Return a simple scalar score for one local candidate."""
34 score = 0.05 * candidate["fin_count"] + 0.22 * candidate["gap_mm"] + 0.15 * candidate["wall_mm"]
35 return {"objective_value": round(score, 4)}
36
37
38def _greedy_candidate(problem: _HeatSinkDecisionProblem) -> dict[str, float]:
39 """Return the highest-scoring deterministic candidate."""
40 candidates = problem.iter_candidates()
41 return max(candidates, key=lambda candidate: problem.evaluate(candidate)["objective_value"])
42
43
44def main() -> None:
45 """Run the seeded random baseline next to a deterministic comparator."""
46 problem = _HeatSinkDecisionProblem()
47 agent = drag.SeededRandomBaselineAgent(seed=7)
48 random_run = agent.run(
49 "Sample one seeded random control candidate for the local heat sink study.",
50 dependencies={"problem": problem},
51 )
52 random_candidate = random_run.output_dict("final_output")
53
54 greedy_candidate = _greedy_candidate(problem)
55 payload = {
56 "problem_id": problem.metadata.problem_id,
57 "random_condition": {
58 "candidate": random_candidate,
59 "score": problem.evaluate(random_candidate)["objective_value"],
60 "metadata": random_run.metadata,
61 },
62 "greedy_condition": {
63 "candidate": greedy_candidate,
64 "score": problem.evaluate(greedy_candidate)["objective_value"],
65 },
66 }
67 print(json.dumps(payload, ensure_ascii=True, indent=2, sort_keys=True))
68
69
70if __name__ == "__main__":
71 main()
Expected Results#
Run Command
PYTHONPATH=src python3 examples/agents/seeded_random_baseline_agent.py
Example output shape:
{
"problem_id": "local_heat_sink_layout",
"random_condition": {
"candidate": {"fin_count": 6.0, "gap_mm": 2.0, "wall_mm": 1.0},
"score": 0.89
},
"greedy_condition": {
"candidate": {"fin_count": 8.0, "gap_mm": 2.0, "wall_mm": 1.5},
"score": 1.065
}
}