Public API Tour#
Source: examples/catalog/public_api_tour.py
Introduction#
Tour the curated public API through concrete packaged objects.
Technical Implementation#
This page is generated from the top-of-file module docstring and the example source code. The full script is included below for direct inspection.
1from __future__ import annotations
2
3import numpy
4
5import design_research_problems as derp
6
7
8def main() -> None:
9 """Load one example from each family and print typed public-API touchpoints."""
10 registry = derp.ProblemRegistry()
11 catalog: derp.IdeationCatalog = derp.get_ideation_catalog()
12
13 text_problem = derp.get_problem("ideation_peanut_shelling_fu_cagan_kotovsky_2010")
14 typed_text_problem = derp.get_problem_as(
15 "ideation_peanut_shelling_fu_cagan_kotovsky_2010",
16 derp.TextProblem,
17 )
18 decision_problem = derp.get_problem_as(
19 "decision_laptop_design_profit_maximization",
20 derp.DecisionProblem,
21 )
22 optimization_problem = derp.get_problem_as("gmpb_default_dynamic_min", derp.OptimizationProblem)
23 grammar_problem = derp.get_problem_as("iot_home_cooling_system_design", derp.GrammarProblem)
24 mcp_problem = derp.get_problem_as("mcp_build123d_parametric_mounting_bracket", derp.MCPProblem)
25
26 loaded_problems: tuple[derp.Problem, ...] = (
27 text_problem,
28 typed_text_problem,
29 decision_problem,
30 optimization_problem,
31 grammar_problem,
32 mcp_problem,
33 )
34 computable_count = sum(isinstance(problem, derp.ComputableProblem) for problem in loaded_problems)
35
36 metadata: derp.ProblemMetadata = typed_text_problem.metadata
37 taxonomy: derp.ProblemTaxonomy = metadata.taxonomy
38 citations: tuple[derp.Citation, ...] = metadata.citations
39 assets: tuple[derp.ProblemAsset, ...] = metadata.assets
40
41 kinds = {listed_metadata.kind for listed_metadata in registry.list()}
42 assert derp.ProblemKind.TEXT in kinds
43 assert derp.ProblemKind.DECISION in kinds
44 assert derp.ProblemKind.OPTIMIZATION in kinds
45 assert derp.ProblemKind.GRAMMAR in kinds
46 assert derp.ProblemKind.MCP in kinds
47
48 best_decision: derp.DecisionEvaluation = decision_problem.best_evaluation()
49
50 candidate = numpy.zeros(optimization_problem.bounds.lb.shape, dtype=float)
51 optimization_evaluation: derp.OptimizationEvaluation = optimization_problem.evaluate(candidate)
52
53 transition: derp.GrammarTransition = grammar_problem.enumerate_transitions(grammar_problem.initial_state())[0]
54
55 prompt: derp.IdeationPromptRecord = catalog.list_prompts()[0]
56 variant: derp.IdeationPromptVariant = catalog.get_variant(prompt.variant_ids[0])
57 family: derp.IdeationPromptFamily = catalog.get_family(prompt.family_id)
58 study: derp.IdeationStudy = catalog.list_studies()[0]
59 evidence_tier: derp.EvidenceTier = prompt.evidence_tier
60
61 try:
62 derp.get_problem_as(
63 "ideation_peanut_shelling_fu_cagan_kotovsky_2010",
64 derp.OptimizationProblem,
65 )
66 except (TypeError, derp.ProblemEvaluationError) as exc:
67 mismatch_error = type(exc).__name__
68 else:
69 mismatch_error = "no-error"
70
71 handled_optional_error = derp.MissingOptionalDependencyError.__name__
72
73 print("problem-count", len(derp.list_problems()))
74 print("kind-count", len(kinds), sorted(kind.value for kind in kinds))
75 print("loaded-types", [type(problem).__name__ for problem in loaded_problems])
76 print("computable-count", computable_count)
77 print("text-kind", metadata.problem_id, metadata.kind.value)
78 print("taxonomy-tags", len(taxonomy.tags))
79 print("citation-year", citations[0].year)
80 print("asset-count", len(assets))
81 print("decision-best", round(best_decision.objective_value, 6), best_decision.candidate_label)
82 print(
83 "optimization-eval",
84 optimization_evaluation.is_feasible,
85 round(optimization_evaluation.objective_value, 6),
86 )
87 print("grammar-rule", transition.rule_name)
88 print(
89 "evaluation-types",
90 type(best_decision).__name__,
91 type(optimization_evaluation).__name__,
92 type(transition).__name__,
93 )
94 print("mcp-problem", mcp_problem.metadata.problem_id)
95 print(
96 "ideation-types",
97 type(prompt).__name__,
98 type(variant).__name__,
99 type(family).__name__,
100 type(study).__name__,
101 evidence_tier.value,
102 )
103 print(
104 "primary-verbatim-prompts",
105 len(
106 catalog.search_prompts(
107 evidence_tiers=(derp.EvidenceTier.PRIMARY_VERBATIM,),
108 status="complete",
109 )
110 ),
111 )
112 print("handled-errors", handled_optional_error, mismatch_error)
113
114
115if __name__ == "__main__":
116 main()
Expected Results#
Run Command
PYTHONPATH=src python3 examples/catalog/public_api_tour.py
Run the command shown below from repository root. Output should summarize the problem setup, a baseline solution, or diagnostic values relevant to this example.