Calling Scripts
Below are example calling scripts used to populate specifications for each user
function and libEnsemble before initiating libEnsemble via the primary libE()
call. The primary libEnsemble-relevant portions have been highlighted in each
example. Non-highlighted portions may include setup routines, compilation steps
for user applications, or output processing. The first two scripts correspond to
random sampling calculations, while the third corresponds to an optimization routine.
Many other examples of calling scripts can be found in libEnsemble’s regression tests.
Local Sine Tutorial
This example is from the Local Sine Tutorial,
meant to run with Python’s multiprocessing as the primary comms
method.
1import numpy as np
2from sine_gen import gen_random_sample
3from sine_sim import sim_find_sine
4
5from libensemble import Ensemble
6from libensemble.specs import ExitCriteria, GenSpecs, LibeSpecs, SimSpecs
7
8if __name__ == "__main__": # Python-quirk required on macOS and windows
9 libE_specs = LibeSpecs(nworkers=4, comms="local")
10
11 gen_specs = GenSpecs(
12 gen_f=gen_random_sample, # Our generator function
13 out=[("x", float, (1,))], # gen_f output (name, type, size)
14 user={
15 "lower": np.array([-3]), # lower boundary for random sampling
16 "upper": np.array([3]), # upper boundary for random sampling
17 "gen_batch_size": 5, # number of x's gen_f generates per call
18 },
19 )
20
21 sim_specs = SimSpecs(
22 sim_f=sim_find_sine, # Our simulator function
23 inputs=["x"], # InputArray field names. "x" from gen_f output
24 out=[("y", float)], # sim_f output. "y" = sine("x")
25 ) # sim_specs_end_tag
26
27 exit_criteria = ExitCriteria(sim_max=80) # Stop libEnsemble after 80 simulations
28
29 ensemble = Ensemble(sim_specs, gen_specs, exit_criteria, libE_specs)
30 ensemble.add_random_streams() # setup the random streams unique to each worker
31 ensemble.run() # start the ensemble. Blocks until completion.
32
33 history = ensemble.H # start visualizing our results
34
35 print([i for i in history.dtype.fields]) # (optional) to visualize our history array
36 print(history)
37
38 import matplotlib.pyplot as plt
39
40 colors = ["b", "g", "r", "y", "m", "c", "k", "w"]
41
42 for i in range(1, libE_specs.nworkers + 1):
43 worker_xy = np.extract(history["sim_worker"] == i, history)
44 x = [entry.tolist()[0] for entry in worker_xy["x"]]
45 y = [entry for entry in worker_xy["y"]]
46 plt.scatter(x, y, label="Worker {}".format(i), c=colors[i - 1])
47
48 plt.title("Sine calculations for a uniformly sampled random distribution")
49 plt.xlabel("x")
50 plt.ylabel("sine(x)")
51 plt.legend(loc="lower right")
52 plt.savefig("tutorial_sines.png")
Electrostatic Forces with Executor
These examples are from a test for evaluating the scaling capabilities of libEnsemble
by calculating particle electrostatic forces through a user application. This
application is registered with either the MPI or Balsam Executor, then submitted
for execution in the sim_f
. Note the use of the parse_args()
and
save_libE_output()
convenience functions from the tools module
in the first calling script.
Traditional Version
1#!/usr/bin/env python
2import os
3import sys
4
5import numpy as np
6from forces_simf import run_forces # Sim func from current dir
7from forces_support import check_log_exception, test_ensemble_dir, test_libe_stats
8
9from libensemble import logger
10from libensemble.executors.mpi_executor import MPIExecutor
11
12# Import libEnsemble modules
13from libensemble.libE import libE
14from libensemble.manager import ManagerException
15from libensemble.tools import add_unique_random_streams, parse_args, save_libE_output
16
17PERSIS_GEN = False
18
19if PERSIS_GEN:
20 from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f
21 from libensemble.gen_funcs.persistent_sampling import persistent_uniform as gen_f
22else:
23 from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first as alloc_f
24 from libensemble.gen_funcs.sampling import uniform_random_sample as gen_f
25
26
27logger.set_level("INFO") # INFO is now default
28
29nworkers, is_manager, libE_specs, _ = parse_args()
30
31sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
32
33if not os.path.isfile(sim_app):
34 sys.exit("forces.x not found - please build first in ../forces_app dir")
35
36if is_manager:
37 print(f"\nRunning with {nworkers} workers\n")
38
39exctr = MPIExecutor()
40exctr.register_app(full_path=sim_app, app_name="forces")
41
42# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.
43
44# State the objective function, its arguments, output, and necessary parameters (and their sizes)
45sim_specs = {
46 "sim_f": run_forces, # Function whose output is being minimized
47 "in": ["x"], # Name of input for sim_f
48 "out": [("energy", float)], # Name, type of output from sim_f
49 "user": {
50 "keys": ["seed"],
51 "cores": 2,
52 "sim_particles": 1e3,
53 "sim_timesteps": 5,
54 "sim_kill_minutes": 10.0,
55 "particle_variance": 0.2,
56 "kill_rate": 0.5,
57 "fail_on_sim": False,
58 "fail_on_submit": False, # Won't occur if 'fail_on_sim' True
59 },
60}
61# end_sim_specs_rst_tag
62
63# State the generating function, its arguments, output, and necessary parameters.
64gen_specs = {
65 "gen_f": gen_f, # Generator function
66 "in": [], # Generator input
67 "out": [("x", float, (1,))], # Name, type and size of data produced (must match sim_specs 'in')
68 "user": {
69 "lb": np.array([0]), # Lower bound for random sample array (1D)
70 "ub": np.array([32767]), # Upper bound for random sample array (1D)
71 "gen_batch_size": 1000, # How many random samples to generate in one call
72 },
73}
74
75if PERSIS_GEN:
76 alloc_specs = {"alloc_f": alloc_f}
77else:
78 alloc_specs = {
79 "alloc_f": alloc_f,
80 "user": {
81 "batch_mode": True, # If true wait for all sims to process before generate more
82 "num_active_gens": 1, # Only one active generator at a time
83 },
84 }
85
86libE_specs["save_every_k_gens"] = 1000 # Save every K steps
87libE_specs["sim_dirs_make"] = True # Separate each sim into a separate directory
88libE_specs["profile"] = False # Whether to have libE profile on (default False)
89
90# Maximum number of simulations
91sim_max = 8
92exit_criteria = {"sim_max": sim_max}
93
94# Create a different random number stream for each worker and the manager
95persis_info = {}
96persis_info = add_unique_random_streams(persis_info, nworkers + 1)
97
98try:
99 H, persis_info, flag = libE(
100 sim_specs,
101 gen_specs,
102 exit_criteria,
103 persis_info=persis_info,
104 alloc_specs=alloc_specs,
105 libE_specs=libE_specs,
106 )
107
108except ManagerException:
109 if is_manager and sim_specs["user"]["fail_on_sim"]:
110 check_log_exception()
111 test_libe_stats("Exception occurred\n")
112else:
113 if is_manager:
114 save_libE_output(H, persis_info, __file__, nworkers)
115 if sim_specs["user"]["fail_on_submit"]:
116 test_libe_stats("Task Failed\n")
117 test_ensemble_dir(libE_specs, "./ensemble", nworkers, sim_max)
Object + yaml Version
1#!/usr/bin/env python
2import os
3import sys
4
5import numpy as np
6
7from libensemble.ensemble import Ensemble
8from libensemble.executors.mpi_executor import MPIExecutor
9from libensemble.tools import add_unique_random_streams
10
11####################
12
13sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
14
15if not os.path.isfile(sim_app):
16 sys.exit("forces.x not found - please build first in ../forces_app dir")
17
18
19####################
20
21forces = Ensemble(parse_args=True)
22forces.from_yaml("forces.yaml")
23
24forces.logger.set_level("INFO")
25
26if forces.is_manager:
27 print(f"\nRunning with {forces.nworkers} workers\n")
28
29exctr = MPIExecutor()
30exctr.register_app(full_path=sim_app, app_name="forces")
31
32forces.libE_specs["ensemble_dir_path"] = "./ensemble"
33forces.gen_specs.user.update(
34 {
35 "lb": np.array([0]),
36 "ub": np.array([32767]),
37 }
38)
39
40forces.persis_info = add_unique_random_streams({}, forces.nworkers + 1)
41
42forces.run()
43forces.save_output(__file__)
1libE_specs:
2 save_every_k_gens: 1000
3 sim_dirs_make: True
4 profile: False
5
6exit_criteria:
7 sim_max: 8
8
9sim_specs:
10 sim_f: forces_simf.run_forces
11 inputs:
12 - x
13 outputs:
14 energy:
15 type: float
16
17 user:
18 keys:
19 - seed
20 cores: 1
21 sim_particles: 1.e+3
22 sim_timesteps: 5
23 sim_kill_minutes: 10.0
24 particle_variance: 0.2
25 kill_rate: 0.5
26 fail_on_sim: False
27 fail_on_submit: False
28
29gen_specs:
30 gen_f: libensemble.gen_funcs.sampling.uniform_random_sample
31 outputs:
32 x:
33 type: float
34 size: 1
35 user:
36 gen_batch_size: 1000
37
38alloc_specs:
39 alloc_f: libensemble.alloc_funcs.give_sim_work_first.give_sim_work_first
40 outputs:
41 allocated:
42 type: bool
43 user:
44 batch_mode: True
45 num_active_gens: 1
Persistent APOSMM with Gradients
This example is also from the regression tests and demonstrates configuring a persistent run via a custom allocation function.
1"""
2Runs libEnsemble with APOSMM with an NLopt local optimizer that uses gradient
3information from the sim_f
4
5Execute via one of the following commands (e.g. 3 workers):
6 mpiexec -np 4 python test_persistent_aposmm_with_grad.py
7 python test_persistent_aposmm_with_grad.py --nworkers 3 --comms local
8 python test_persistent_aposmm_with_grad.py --nworkers 3 --comms tcp
9
10When running with the above commands, the number of concurrent evaluations of
11the objective function will be 2, as one of the three workers will be the
12persistent generator.
13"""
14
15# Do not change these lines - they are parsed by run-tests.sh
16# TESTSUITE_COMMS: local mpi tcp
17# TESTSUITE_NPROCS: 4
18# TESTSUITE_EXTRA: true
19
20import multiprocessing
21import sys
22from math import gamma, pi, sqrt
23
24import numpy as np
25
26import libensemble.gen_funcs
27
28# Import libEnsemble items for this test
29from libensemble.libE import libE
30from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f
31from libensemble.sim_funcs.six_hump_camel import six_hump_camel_func, six_hump_camel_grad
32
33libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt"
34from time import time
35
36from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
37from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
38from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
39from libensemble.tools import add_unique_random_streams, parse_args, save_libE_output
40
41# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows).
42if __name__ == "__main__":
43 multiprocessing.set_start_method("fork", force=True)
44
45 nworkers, is_manager, libE_specs, _ = parse_args()
46
47 if is_manager:
48 start_time = time()
49
50 if nworkers < 2:
51 sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
52
53 n = 2
54 sim_specs = {
55 "sim_f": sim_f,
56 "in": ["x"],
57 "out": [("f", float), ("grad", float, n)],
58 }
59
60 gen_out = [
61 ("x", float, n),
62 ("x_on_cube", float, n),
63 ("sim_id", int),
64 ("local_min", bool),
65 ("local_pt", bool),
66 ]
67
68 gen_in = ["x", "f", "grad", "local_pt", "sim_id", "sim_ended", "x_on_cube", "local_min"]
69
70 gen_specs = {
71 "gen_f": gen_f,
72 "in": gen_in,
73 "persis_in": gen_in,
74 "out": gen_out,
75 "user": {
76 "initial_sample_size": 0, # Don't need to do evaluations because the sampling already done below
77 "localopt_method": "LD_MMA",
78 "rk_const": 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
79 "stop_after_k_minima": 25,
80 "xtol_rel": 1e-6,
81 "ftol_rel": 1e-6,
82 "max_active_runs": 6,
83 "lb": np.array([-3, -2]),
84 "ub": np.array([3, 2]),
85 },
86 }
87
88 alloc_specs = {"alloc_f": alloc_f}
89
90 persis_info = add_unique_random_streams({}, nworkers + 1)
91
92 exit_criteria = {"sim_max": 1000}
93
94 # Load in "already completed" set of 'x','f','grad' values to give to libE/persistent_aposmm
95 sample_size = len(minima)
96
97 H0_dtype = [
98 ("x", float, n),
99 ("grad", float, n),
100 ("sim_id", int),
101 ("x_on_cube", float, n),
102 ("sim_ended", bool),
103 ("f", float),
104 ("gen_informed", bool),
105 ("sim_started", bool),
106 ]
107 H0 = np.zeros(sample_size, dtype=H0_dtype)
108
109 # Two points in the following sample have the same best function value, which
110 # tests the corner case for some APOSMM logic
111 H0["x"] = np.round(minima, 1)
112 H0["x_on_cube"] = (H0["x"] - gen_specs["user"]["lb"]) / (gen_specs["user"]["ub"] - gen_specs["user"]["lb"])
113 H0["sim_id"] = range(sample_size)
114 H0[["sim_started", "gen_informed", "sim_ended"]] = True
115
116 for i in range(sample_size):
117 H0["f"][i] = six_hump_camel_func(H0["x"][i])
118 H0["grad"][i] = six_hump_camel_grad(H0["x"][i])
119
120 # Perform the run
121 H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs, H0=H0)
122
123 if is_manager:
124 assert persis_info[1].get("run_order"), "Run_order should have been given back"
125 assert (
126 len(persis_info[1]["run_order"]) >= gen_specs["user"]["stop_after_k_minima"]
127 ), "This test should have many runs started."
128 assert len(H) < exit_criteria["sim_max"], "Test should have stopped early due to 'stop_after_k_minima'"
129
130 print("[Manager]:", H[np.where(H["local_min"])]["x"])
131 print("[Manager]: Time taken =", time() - start_time, flush=True)
132
133 tol = 1e-5
134 for m in minima:
135 # The minima are known on this test problem.
136 # We use their values to test APOSMM has identified all minima
137 print(np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)), flush=True)
138 assert np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)) < tol
139
140 save_libE_output(H, persis_info, __file__, nworkers)