Skip to content

BayesianOptimizer

mdo_framework.optimization.optimizer.BayesianOptimizer

Bayesian Optimizer using Ax Platform.

Parameters:

Name Type Description Default
evaluator Evaluator

Local or Remote implementation of Evaluator protocol.

required
parameters list[dict[str, Any]]

Dict defining the variables bounds, choices, and types.

required
objectives list[dict[str, Any]]

Dict defining the targeted metrics and their directions.

required
constraints list[dict[str, Any]] | None

Dict defining boundaries mapped out of OpenMDAO runs.

None
fidelity_parameter str | None

Name of variable designating multi-fidelity.

None
use_bonsai bool

Toggle for experimental algorithmic execution.

False
Source code in src/mdo_framework/optimization/optimizer.py
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
class BayesianOptimizer:
    """Bayesian Optimizer using Ax Platform.

    Args:
        evaluator: Local or Remote implementation of Evaluator protocol.
        parameters: Dict defining the variables bounds, choices, and types.
        objectives: Dict defining the targeted metrics and their directions.
        constraints: Dict defining boundaries mapped out of OpenMDAO runs.
        fidelity_parameter: Name of variable designating multi-fidelity.
        use_bonsai: Toggle for experimental algorithmic execution.

    """

    def __init__(
        self,
        evaluator: Evaluator,
        parameters: list[dict[str, Any]],
        objectives: list[dict[str, Any]],
        constraints: list[dict[str, Any]] | None = None,
        fidelity_parameter: str | None = None,
        use_bonsai: bool = False,
    ) -> None:
        self.evaluator = evaluator
        self.parameters = parameters
        self.objectives = objectives
        self.constraints = constraints or []
        self.fidelity_parameter = fidelity_parameter
        self.use_bonsai = use_bonsai

    def optimize(self, n_steps: int = 5, n_init: int = 5) -> dict[str, Any]:
        """Runs the optimization loop using AxClient.

        Iteratively generates candidate parameters, evaluates them using the
        provided evaluator, and updates the underlying Gaussian Process model.

        Args:
            n_steps: Number of Bayesian optimization steps to perform. Default is 5.
            n_init: Number of initial Sobol (quasi-random) exploration steps. Default is 5.

        Returns:
            A dictionary containing:
            - 'best_parameters': The optimal parameters found (or None if unresolved).
            - 'best_objectives': The metrics associated with the optimal parameters.
            - 'history': List of dicts representing all evaluated trials.
            - 'serialized_client': JSON string representation of the Ax client state.

        Example:
            ```python
            optimizer = BayesianOptimizer(evaluator, parameters, objectives)
            result = optimizer.optimize(n_steps=10, n_init=10)
            print(result["best_parameters"])
            ```

        """
        if self.fidelity_parameter is not None:
            warnings.warn(
                f"The `fidelity_parameter` argument ('{self.fidelity_parameter}') is "
                "currently not supported by the modern Ax `Client` API. "
                "`RangeParameterConfig` does not yet expose `is_fidelity` or "
                "`target_value`. The parameter will be treated as a regular range "
                "parameter until this is addressed upstream in Ax. "
                "Track: https://github.com/facebook/ax",
                UserWarning,
                stacklevel=2,
            )

        # Determine client setup
        if self.use_bonsai:
            logger.warning("Experimental feature BONSAI algorithm is activated.")
            gs = GenerationStrategy(
                name="bonsai",
                nodes=[
                    GenerationStep(
                        generator=Models.SOBOL,
                        num_trials=n_init,
                        min_trials_observed=n_init,
                    ),
                    GenerationStep(
                        generator=Models.BOTORCH_MODULAR,
                        num_trials=-1,
                    ),
                ],
            )

        else:
            gs = GenerationStrategy(
                name="botorch_modular",
                nodes=[
                    GenerationStep(
                        generator=Models.SOBOL,
                        num_trials=n_init,
                        min_trials_observed=n_init,
                    ),
                    GenerationStep(
                        generator=Models.BOTORCH_MODULAR,
                        num_trials=-1,
                        generator_kwargs={
                            "botorch_acqf_class": qLogNoisyExpectedImprovement,
                        },
                    ),
                ],
            )

        # Configure parameter space
        client = Client()

        ax_params = []
        for p in self.parameters:
            if p["type"] == "range":
                ax_params.append(
                    RangeParameterConfig(
                        name=p["name"],
                        parameter_type=p.get("value_type", "float"),
                        bounds=p["bounds"],
                    ),
                )
            elif p["type"] == "choice":
                ax_params.append(
                    ChoiceParameterConfig(
                        name=p["name"],
                        parameter_type=p.get("value_type", "float"),
                        values=p["values"],
                    ),
                )

        client.configure_experiment(
            name="mdo_optimization",
            parameters=ax_params,
        )

        objective_str = ", ".join(
            [
                f"{'-' if obj.get('minimize', True) else ''}{obj['name']}"
                for obj in self.objectives
            ],
        )

        client.configure_optimization(
            objective=objective_str,
            outcome_constraints=[
                f"{c['name']} {c['op']} {c['bound']}" for c in self.constraints
            ],
        )

        client.set_generation_strategy(gs)

        history = []

        total_trials = n_init + n_steps
        objective_names = [o["name"] for o in self.objectives] + [
            c["name"] for c in self.constraints
        ]

        for _ in range(total_trials):
            trials = client.get_next_trials(max_trials=1)
            for trial_index, parameters in trials.items():
                # Evaluate using the evaluator
                results = self.evaluator.evaluate(parameters, objective_names)

                # Record history
                history.append({"parameters": parameters, "objectives": results})

                # Complete the trial
                client.complete_trial(trial_index=trial_index, raw_data=results)

        try:
            # Handle possible pareto frontier for multi-objective
            if len(self.objectives) > 1:
                frontier = client.get_pareto_frontier()
                best_params = []
                best_objs = []
                if frontier:
                    for params, metrics, trial_idx, arm_name in frontier:
                        best_params.append(params)
                        best_objs.append(
                            {
                                k: v[0] if isinstance(v, tuple) else v
                                for k, v in metrics.items()
                            },
                        )
                else:
                    best_params = None
                    best_objs = None

                return {
                    "best_parameters": best_params,
                    "best_objectives": best_objs,
                    "history": history,
                    "serialized_client": json.dumps(client._to_json_snapshot()),
                }
            best_parameters, best_obj, trial_idx, arm_name = (
                client.get_best_parameterization()
            )
            return {
                "best_parameters": best_parameters,
                "best_objectives": best_obj,
                "history": history,
                "serialized_client": json.dumps(client._to_json_snapshot()),
            }
        except Exception as e:
            logger.warning(f"Could not retrieve best parameters: {e}")
            return {
                "best_parameters": None,
                "best_objectives": None,
                "history": history,
                "serialized_client": client.to_json_snapshot(),
            }

optimize(n_steps=5, n_init=5)

Runs the optimization loop using AxClient.

Iteratively generates candidate parameters, evaluates them using the provided evaluator, and updates the underlying Gaussian Process model.

Parameters:

Name Type Description Default
n_steps int

Number of Bayesian optimization steps to perform. Default is 5.

5
n_init int

Number of initial Sobol (quasi-random) exploration steps. Default is 5.

5

Returns:

Type Description
dict[str, Any]

A dictionary containing:

dict[str, Any]
  • 'best_parameters': The optimal parameters found (or None if unresolved).
dict[str, Any]
  • 'best_objectives': The metrics associated with the optimal parameters.
dict[str, Any]
  • 'history': List of dicts representing all evaluated trials.
dict[str, Any]
  • 'serialized_client': JSON string representation of the Ax client state.
Example
optimizer = BayesianOptimizer(evaluator, parameters, objectives)
result = optimizer.optimize(n_steps=10, n_init=10)
print(result["best_parameters"])
Source code in src/mdo_framework/optimization/optimizer.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def optimize(self, n_steps: int = 5, n_init: int = 5) -> dict[str, Any]:
    """Runs the optimization loop using AxClient.

    Iteratively generates candidate parameters, evaluates them using the
    provided evaluator, and updates the underlying Gaussian Process model.

    Args:
        n_steps: Number of Bayesian optimization steps to perform. Default is 5.
        n_init: Number of initial Sobol (quasi-random) exploration steps. Default is 5.

    Returns:
        A dictionary containing:
        - 'best_parameters': The optimal parameters found (or None if unresolved).
        - 'best_objectives': The metrics associated with the optimal parameters.
        - 'history': List of dicts representing all evaluated trials.
        - 'serialized_client': JSON string representation of the Ax client state.

    Example:
        ```python
        optimizer = BayesianOptimizer(evaluator, parameters, objectives)
        result = optimizer.optimize(n_steps=10, n_init=10)
        print(result["best_parameters"])
        ```

    """
    if self.fidelity_parameter is not None:
        warnings.warn(
            f"The `fidelity_parameter` argument ('{self.fidelity_parameter}') is "
            "currently not supported by the modern Ax `Client` API. "
            "`RangeParameterConfig` does not yet expose `is_fidelity` or "
            "`target_value`. The parameter will be treated as a regular range "
            "parameter until this is addressed upstream in Ax. "
            "Track: https://github.com/facebook/ax",
            UserWarning,
            stacklevel=2,
        )

    # Determine client setup
    if self.use_bonsai:
        logger.warning("Experimental feature BONSAI algorithm is activated.")
        gs = GenerationStrategy(
            name="bonsai",
            nodes=[
                GenerationStep(
                    generator=Models.SOBOL,
                    num_trials=n_init,
                    min_trials_observed=n_init,
                ),
                GenerationStep(
                    generator=Models.BOTORCH_MODULAR,
                    num_trials=-1,
                ),
            ],
        )

    else:
        gs = GenerationStrategy(
            name="botorch_modular",
            nodes=[
                GenerationStep(
                    generator=Models.SOBOL,
                    num_trials=n_init,
                    min_trials_observed=n_init,
                ),
                GenerationStep(
                    generator=Models.BOTORCH_MODULAR,
                    num_trials=-1,
                    generator_kwargs={
                        "botorch_acqf_class": qLogNoisyExpectedImprovement,
                    },
                ),
            ],
        )

    # Configure parameter space
    client = Client()

    ax_params = []
    for p in self.parameters:
        if p["type"] == "range":
            ax_params.append(
                RangeParameterConfig(
                    name=p["name"],
                    parameter_type=p.get("value_type", "float"),
                    bounds=p["bounds"],
                ),
            )
        elif p["type"] == "choice":
            ax_params.append(
                ChoiceParameterConfig(
                    name=p["name"],
                    parameter_type=p.get("value_type", "float"),
                    values=p["values"],
                ),
            )

    client.configure_experiment(
        name="mdo_optimization",
        parameters=ax_params,
    )

    objective_str = ", ".join(
        [
            f"{'-' if obj.get('minimize', True) else ''}{obj['name']}"
            for obj in self.objectives
        ],
    )

    client.configure_optimization(
        objective=objective_str,
        outcome_constraints=[
            f"{c['name']} {c['op']} {c['bound']}" for c in self.constraints
        ],
    )

    client.set_generation_strategy(gs)

    history = []

    total_trials = n_init + n_steps
    objective_names = [o["name"] for o in self.objectives] + [
        c["name"] for c in self.constraints
    ]

    for _ in range(total_trials):
        trials = client.get_next_trials(max_trials=1)
        for trial_index, parameters in trials.items():
            # Evaluate using the evaluator
            results = self.evaluator.evaluate(parameters, objective_names)

            # Record history
            history.append({"parameters": parameters, "objectives": results})

            # Complete the trial
            client.complete_trial(trial_index=trial_index, raw_data=results)

    try:
        # Handle possible pareto frontier for multi-objective
        if len(self.objectives) > 1:
            frontier = client.get_pareto_frontier()
            best_params = []
            best_objs = []
            if frontier:
                for params, metrics, trial_idx, arm_name in frontier:
                    best_params.append(params)
                    best_objs.append(
                        {
                            k: v[0] if isinstance(v, tuple) else v
                            for k, v in metrics.items()
                        },
                    )
            else:
                best_params = None
                best_objs = None

            return {
                "best_parameters": best_params,
                "best_objectives": best_objs,
                "history": history,
                "serialized_client": json.dumps(client._to_json_snapshot()),
            }
        best_parameters, best_obj, trial_idx, arm_name = (
            client.get_best_parameterization()
        )
        return {
            "best_parameters": best_parameters,
            "best_objectives": best_obj,
            "history": history,
            "serialized_client": json.dumps(client._to_json_snapshot()),
        }
    except Exception as e:
        logger.warning(f"Could not retrieve best parameters: {e}")
        return {
            "best_parameters": None,
            "best_objectives": None,
            "history": history,
            "serialized_client": client.to_json_snapshot(),
        }

Evaluator

mdo_framework.optimization.optimizer.Evaluator

Bases: Protocol

Source code in src/mdo_framework/optimization/optimizer.py
25
26
27
28
29
30
31
32
class Evaluator(Protocol):
    def evaluate(
        self,
        parameters: dict[str, Any],
        objectives: list[str],
    ) -> dict[str, float]:
        """Evaluates the requested objectives given the design parameters."""
        ...

evaluate(parameters, objectives)

Evaluates the requested objectives given the design parameters.

Source code in src/mdo_framework/optimization/optimizer.py
26
27
28
29
30
31
32
def evaluate(
    self,
    parameters: dict[str, Any],
    objectives: list[str],
) -> dict[str, float]:
    """Evaluates the requested objectives given the design parameters."""
    ...

LocalEvaluator

mdo_framework.core.evaluators.LocalEvaluator

Evaluates the design parameters locally using a GEMSEO MDA instance.

Parameters:

Name Type Description Default
problem Discipline

An instantiated GEMSEO MDA (or Discipline) object.

required
Source code in src/mdo_framework/core/evaluators.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
class LocalEvaluator:
    """Evaluates the design parameters locally using a GEMSEO MDA instance.

    Args:
        problem: An instantiated GEMSEO MDA (or Discipline) object.
    """

    def __init__(self, problem: Discipline):
        self.problem = problem

    def evaluate(
        self,
        parameters: dict[str, Any],
        objectives: list[str],
    ) -> dict[str, float]:
        # GEMSEO uses a dictionary with string keys and numpy array values for local_data
        input_data = {name: np.atleast_1d(val) for name, val in parameters.items()}

        # We need to provide all required inputs for the MDA, not just parameters.
        # This will be passed and merged internally by execute.
        output_data = self.problem.execute(input_data)

        results = {}
        for obj in objectives:
            val = output_data.get(obj)
            if val is not None:
                results[obj] = float(val[0]) if isinstance(val, np.ndarray) and val.size > 0 else float(val)
            else:
                results[obj] = 0.0 # Or raise error depending on context
        return results

RemoteEvaluator

mdo_framework.optimization.optimizer.RemoteEvaluator

Evaluates the design parameters remotely by communicating with the Execution microservice.

Parameters:

Name Type Description Default
service_url str

The URL of the execution service.

required
Source code in src/mdo_framework/optimization/optimizer.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
class RemoteEvaluator:
    """Evaluates the design parameters remotely by communicating with the Execution microservice.

    Args:
        service_url: The URL of the execution service.

    """

    def __init__(self, service_url: str):
        self.service_url = service_url

    def evaluate(
        self,
        parameters: dict[str, Any],
        objectives: list[str],
    ) -> dict[str, float]:

        payload = {
            "inputs": parameters,
            "objectives": objectives,
        }
        response = httpx.post(f"{self.service_url}/evaluate", json=payload)
        response.raise_for_status()
        data = response.json()
        return data["results"]