Recipe Prompt Framing Run
Source: examples/recipe_prompt_framing_run.py
Introduction
Execute a non-default prompt-framing recipe with deterministic mock components.
Technical Implementation
Build
PromptFramingConfigoverrides for factors, design, budget, and IDs.Create deterministic in-memory problem and agent adapters.
Run the study and write a markdown summary artifact.
1from __future__ import annotations
2
3from pathlib import Path
4
5import design_research_experiments as drex
6
7
8def _build_problem_registry(problem_ids: tuple[str, ...]) -> dict[str, drex.ProblemPacket]:
9 """Build a lightweight in-memory problem registry for the example run."""
10
11 def evaluator(output: dict[str, object]) -> list[dict[str, object]]:
12 """Emit one deterministic evaluator row."""
13 text = str(output.get("text", ""))
14 return [{"metric_name": "novelty", "metric_value": len(text) / 100.0}]
15
16 registry: dict[str, drex.ProblemPacket] = {}
17 for problem_id in problem_ids:
18 registry[problem_id] = drex.ProblemPacket(
19 problem_id=problem_id,
20 family="ideation",
21 brief=f"Ideation brief for {problem_id}",
22 evaluator=evaluator,
23 )
24 return registry
25
26
27def _agent_factory(agent_name: str):
28 """Create a deterministic agent callable for one recipe arm."""
29
30 def _agent(
31 *,
32 problem_packet: drex.ProblemPacket,
33 run_spec: drex.RunSpec,
34 condition: drex.Condition,
35 ) -> dict[str, object]:
36 """Generate one deterministic mock run result for prompt-framing conditions."""
37 run_seed = run_spec.seed
38 factor_assignments = condition.factor_assignments
39 prompt_frame = str(factor_assignments.get("prompt_frame", "neutral"))
40 prompt_difficulty = str(factor_assignments.get("prompt_difficulty", "low"))
41
42 frame_bonus = (
43 0.08 if prompt_frame == "analogy" else 0.04 if prompt_frame == "challenge" else 0.0
44 )
45 difficulty_bonus = 0.03 if prompt_difficulty == "high" else 0.0
46 agent_bonus = 0.04 if agent_name == "creative-agent" else 0.0
47 primary_outcome = round(0.50 + frame_bonus + difficulty_bonus + agent_bonus, 4)
48
49 text = (
50 f"{agent_name} solved {problem_packet.problem_id} "
51 f"with frame={prompt_frame} difficulty={prompt_difficulty} seed={run_seed}"
52 )
53
54 return {
55 "output": {"text": text},
56 "metrics": {
57 "primary_outcome": primary_outcome,
58 "input_tokens": 120,
59 "output_tokens": 220,
60 "cost_usd": 0.015,
61 },
62 "events": [
63 {
64 "event_type": "assistant_output",
65 "text": text,
66 "actor_id": agent_name,
67 }
68 ],
69 "metadata": {"model_name": "example-model"},
70 }
71
72 return _agent
73
74
75def main() -> None:
76 """Run a prompt-framing study with non-default typed recipe overrides."""
77 config = drex.PromptFramingConfig(
78 study_id="prompt-framing-custom",
79 bundle=drex.ideation_bundle(),
80 # Replace sections wholesale with non-default recipe choices.
81 factors=(
82 drex.Factor(
83 name="prompt_frame",
84 description="Prompt framing style.",
85 kind=drex.FactorKind.MANIPULATED,
86 levels=(
87 drex.Level(name="neutral", value="neutral"),
88 drex.Level(name="challenge", value="challenge"),
89 drex.Level(name="analogy", value="analogy"),
90 drex.Level(name="counterfactual", value="counterfactual"),
91 ),
92 ),
93 drex.Factor(
94 name="prompt_difficulty",
95 description="Prompt difficulty.",
96 kind=drex.FactorKind.MANIPULATED,
97 levels=(
98 drex.Level(name="low", value="low"),
99 drex.Level(name="high", value="high"),
100 ),
101 ),
102 ),
103 design_spec={"kind": "constrained_factorial", "randomize": True},
104 run_budget=drex.RunBudget(replicates=1, parallelism=1, max_runs=8),
105 output_dir=Path("artifacts") / "example-prompt-framing",
106 # Explicit values override bundle-provided IDs.
107 problem_ids=("ideation-brief-a", "ideation-brief-b"),
108 agent_specs=("baseline-agent", "creative-agent"),
109 )
110 study = drex.build_prompt_framing_study(config)
111
112 problem_registry = _build_problem_registry(study.problem_ids)
113 agent_factories = {
114 "baseline-agent": lambda _condition: _agent_factory("baseline-agent"),
115 "creative-agent": lambda _condition: _agent_factory("creative-agent"),
116 }
117
118 run_results = drex.run_study(
119 study,
120 agent_factories=agent_factories,
121 problem_registry=problem_registry,
122 include_sqlite=True,
123 )
124
125 summary = drex.render_markdown_summary(study, run_results)
126 summary_path = drex.write_markdown_report(
127 study.output_dir,
128 "prompt_framing_summary.md",
129 summary,
130 )
131
132 print(f"Completed {len(run_results)} runs")
133 print(f"Summary written to {summary_path}")
134
135
136if __name__ == "__main__":
137 main()
Expected Results
Run Command
PYTHONPATH=src python examples/recipe_prompt_framing_run.py
The script prints completed run count and writes
artifacts/example-prompt-framing/artifacts/prompt_framing_summary.md.