Calling Scripts

Below are example calling scripts used to populate specifications for each user function and libEnsemble before initiating libEnsemble via the primary libE() call. The primary libEnsemble-relevant portions have been highlighted in each example. Non-highlighted portions may include setup routines, compilation steps for user applications, or output processing. The first two scripts correspond to random sampling calculations, while the third corresponds to an optimization routine.

Many other examples of calling scripts can be found in libEnsemble’s regression tests.

Local Sine Tutorial

This example is from the Local Sine Tutorial, meant to run with Python’s multiprocessing as the primary comms method.

examples/tutorials/simple_sine/tutorial_calling.py
 1import numpy as np
 2import matplotlib.pyplot as plt
 3from libensemble.libE import libE
 4from libensemble.tools import add_unique_random_streams
 5from tutorial_gen import gen_random_sample
 6from tutorial_sim import sim_find_sine
 7
 8nworkers = 4
 9libE_specs = {"nworkers": nworkers, "comms": "local"}
10
11gen_specs = {
12    "gen_f": gen_random_sample,  # Our generator function
13    "out": [("x", float, (1,))],  # gen_f output (name, type, size).
14    "user": {
15        "lower": np.array([-3]),  # random sampling lower bound
16        "upper": np.array([3]),  # random sampling upper bound
17        "gen_batch_size": 5,  # number of values gen_f will generate per call
18    },
19}
20
21sim_specs = {
22    "sim_f": sim_find_sine,  # Our simulator function
23    "in": ["x"],  # Input field names. 'x' from gen_f output
24    "out": [("y", float)],  # sim_f output. 'y' = sine('x')
25}
26
27persis_info = add_unique_random_streams({}, nworkers + 1)  # Initialize manager/workers random streams
28
29exit_criteria = {"sim_max": 80}  # Stop libEnsemble after 80 simulations
30
31H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, libE_specs=libE_specs)
32
33# Some (optional) statements to visualize our History array
34print([i for i in H.dtype.fields])
35print(H)
36
37colors = ["b", "g", "r", "y", "m", "c", "k", "w"]
38
39for i in range(1, nworkers + 1):
40    worker_xy = np.extract(H["sim_worker"] == i, H)
41    x = [entry.tolist()[0] for entry in worker_xy["x"]]
42    y = [entry for entry in worker_xy["y"]]
43    plt.scatter(x, y, label="Worker {}".format(i), c=colors[i - 1])
44
45plt.title("Sine calculations for a uniformly sampled random distribution")
46plt.xlabel("x")
47plt.ylabel("sine(x)")
48plt.legend(loc="lower right")
49plt.savefig("tutorial_sines.png")

Electrostatic Forces with Executor

These examples are from a test for evaluating the scaling capabilities of libEnsemble by calculating particle electrostatic forces through a user application. This application is registered with either the MPI or Balsam Executor, then submitted for execution in the sim_f. Note the use of the parse_args() and save_libE_output() convenience functions from the tools module in the first calling script.

Traditional Version

tests/scaling_tests/forces/forces_adv/run_libe_forces.py
  1#!/usr/bin/env python
  2import os
  3import sys
  4import numpy as np
  5from forces_simf import run_forces  # Sim func from current dir
  6
  7# Import libEnsemble modules
  8from libensemble.libE import libE
  9from libensemble.manager import ManagerException
 10from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 11from libensemble import logger
 12from forces_support import test_libe_stats, test_ensemble_dir, check_log_exception
 13
 14# Note the Balsam option here is now LegacyBalsam - see balsam_forces for latest.
 15USE_BALSAM = False
 16
 17PERSIS_GEN = False
 18
 19if PERSIS_GEN:
 20    from libensemble.gen_funcs.persistent_sampling import persistent_uniform as gen_f
 21    from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f
 22else:
 23    from libensemble.gen_funcs.sampling import uniform_random_sample as gen_f
 24    from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first as alloc_f
 25
 26
 27logger.set_level("INFO")  # INFO is now default
 28
 29nworkers, is_manager, libE_specs, _ = parse_args()
 30
 31sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
 32
 33if not os.path.isfile(sim_app):
 34    sys.exit("forces.x not found - please build first in ../forces_app dir")
 35
 36if is_manager:
 37    print("\nRunning with {} workers\n".format(nworkers))
 38
 39
 40# Create executor and register sim to it.
 41if USE_BALSAM:
 42    from libensemble.executors.legacy_balsam_executor import LegacyBalsamMPIExecutor
 43
 44    exctr = LegacyBalsamMPIExecutor()
 45else:
 46    from libensemble.executors.mpi_executor import MPIExecutor
 47
 48    exctr = MPIExecutor()
 49exctr.register_app(full_path=sim_app, app_name="forces")
 50
 51# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.
 52
 53# State the objective function, its arguments, output, and necessary parameters (and their sizes)
 54sim_specs = {
 55    "sim_f": run_forces,  # Function whose output is being minimized
 56    "in": ["x"],  # Name of input for sim_f
 57    "out": [("energy", float)],  # Name, type of output from sim_f
 58    "user": {
 59        "keys": ["seed"],
 60        "cores": 2,
 61        "sim_particles": 1e3,
 62        "sim_timesteps": 5,
 63        "sim_kill_minutes": 10.0,
 64        "particle_variance": 0.2,
 65        "kill_rate": 0.5,
 66        "fail_on_sim": False,
 67        "fail_on_submit": False,  # Won't occur if 'fail_on_sim' True
 68    },
 69}
 70# end_sim_specs_rst_tag
 71
 72# State the generating function, its arguments, output, and necessary parameters.
 73gen_specs = {
 74    "gen_f": gen_f,  # Generator function
 75    "in": [],  # Generator input
 76    "out": [("x", float, (1,))],  # Name, type and size of data produced (must match sim_specs 'in')
 77    "user": {
 78        "lb": np.array([0]),  # Lower bound for random sample array (1D)
 79        "ub": np.array([32767]),  # Upper bound for random sample array (1D)
 80        "gen_batch_size": 1000,  # How many random samples to generate in one call
 81    },
 82}
 83
 84if PERSIS_GEN:
 85    alloc_specs = {"alloc_f": alloc_f}
 86else:
 87    alloc_specs = {
 88        "alloc_f": alloc_f,
 89        "user": {
 90            "batch_mode": True,  # If true wait for all sims to process before generate more
 91            "num_active_gens": 1,  # Only one active generator at a time
 92        },
 93    }
 94
 95libE_specs["save_every_k_gens"] = 1000  # Save every K steps
 96libE_specs["sim_dirs_make"] = True  # Separate each sim into a separate directory
 97libE_specs["profile"] = False  # Whether to have libE profile on (default False)
 98
 99# Maximum number of simulations
100sim_max = 8
101exit_criteria = {"sim_max": sim_max}
102
103# Create a different random number stream for each worker and the manager
104persis_info = {}
105persis_info = add_unique_random_streams(persis_info, nworkers + 1)
106
107try:
108    H, persis_info, flag = libE(
109        sim_specs,
110        gen_specs,
111        exit_criteria,
112        persis_info=persis_info,
113        alloc_specs=alloc_specs,
114        libE_specs=libE_specs,
115    )
116
117except ManagerException:
118    if is_manager and sim_specs["user"]["fail_on_sim"]:
119        check_log_exception()
120        test_libe_stats("Exception occurred\n")
121else:
122    if is_manager:
123        save_libE_output(H, persis_info, __file__, nworkers)
124        if sim_specs["user"]["fail_on_submit"]:
125            test_libe_stats("Task Failed\n")
126        test_ensemble_dir(libE_specs, "./ensemble", nworkers, sim_max)

Object + yaml Version

tests/scaling_tests/forces/forces_adv/run_libe_forces_from_yaml.py
 1#!/usr/bin/env python
 2import os
 3import sys
 4import numpy as np
 5
 6from libensemble import Ensemble
 7from libensemble.executors.mpi_executor import MPIExecutor
 8
 9####################
10
11sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
12
13if not os.path.isfile(sim_app):
14    sys.exit("forces.x not found - please build first in ../forces_app dir")
15
16
17####################
18
19forces = Ensemble()
20forces.from_yaml("forces.yaml")
21
22forces.logger.set_level("INFO")
23
24if forces.is_manager:
25    print("\nRunning with {} workers\n".format(forces.nworkers))
26
27exctr = MPIExecutor()
28exctr.register_app(full_path=sim_app, app_name="forces")
29
30forces.libE_specs["ensemble_dir_path"] = "./ensemble"
31forces.gen_specs["user"].update(
32    {
33        "lb": np.array([0]),
34        "ub": np.array([32767]),
35    }
36)
37
38forces.persis_info.add_random_streams()
39
40forces.run()
41
42if forces.is_manager:
43    forces.save_output(__file__)
tests/scaling_tests/forces/forces_adv/forces.yaml
 1libE_specs:
 2    save_every_k_gens: 1000
 3    sim_dirs_make: True
 4    profile: False
 5    exit_criteria:
 6        sim_max: 8
 7
 8sim_specs:
 9    function: forces_simf.run_forces
10    inputs:
11        - x
12    outputs:
13        energy:
14            type: float
15
16    user:
17        keys:
18            - seed
19        cores: 1
20        sim_particles: 1.e+3
21        sim_timesteps: 5
22        sim_kill_minutes: 10.0
23        particle_variance: 0.2
24        kill_rate: 0.5
25        fail_on_sim: False
26        fail_on_submit: False
27
28gen_specs:
29    function: libensemble.gen_funcs.sampling.uniform_random_sample
30    outputs:
31        x:
32            type: float
33            size: 1
34    user:
35        gen_batch_size: 1000
36
37alloc_specs:
38    function: libensemble.alloc_funcs.give_sim_work_first.give_sim_work_first
39    outputs:
40        allocated:
41            type: bool
42    user:
43        batch_mode: True
44        num_active_gens: 1

Persistent APOSMM with Gradients

This example is also from the regression tests and demonstrates configuring a persistent run via a custom allocation function.

tests/regression_tests/test_persistent_aposmm_with_grad.py
  1"""
  2Runs libEnsemble with APOSMM with an NLopt local optimizer that uses gradient
  3information from the sim_f
  4
  5Execute via one of the following commands (e.g. 3 workers):
  6   mpiexec -np 4 python test_persistent_aposmm_with_grad.py
  7   python test_persistent_aposmm_with_grad.py --nworkers 3 --comms local
  8   python test_persistent_aposmm_with_grad.py --nworkers 3 --comms tcp
  9
 10When running with the above commands, the number of concurrent evaluations of
 11the objective function will be 2, as one of the three workers will be the
 12persistent generator.
 13"""
 14
 15# Do not change these lines - they are parsed by run-tests.sh
 16# TESTSUITE_COMMS: local mpi tcp
 17# TESTSUITE_NPROCS: 4
 18# TESTSUITE_EXTRA: true
 19
 20import sys
 21import multiprocessing
 22import numpy as np
 23
 24# Import libEnsemble items for this test
 25from libensemble.libE import libE
 26from math import gamma, pi, sqrt
 27from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f, six_hump_camel_func, six_hump_camel_grad
 28
 29import libensemble.gen_funcs
 30
 31libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt"
 32from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
 33
 34from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
 35from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 36from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
 37from time import time
 38
 39# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows).
 40if __name__ == "__main__":
 41
 42    # Temporary solution while we investigate/resolve slowdowns with "spawn" start method.
 43    multiprocessing.set_start_method("fork", force=True)
 44
 45    nworkers, is_manager, libE_specs, _ = parse_args()
 46
 47    if is_manager:
 48        start_time = time()
 49
 50    if nworkers < 2:
 51        sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
 52
 53    n = 2
 54    sim_specs = {
 55        "sim_f": sim_f,
 56        "in": ["x"],
 57        "out": [("f", float), ("grad", float, n)],
 58    }
 59
 60    gen_out = [
 61        ("x", float, n),
 62        ("x_on_cube", float, n),
 63        ("sim_id", int),
 64        ("local_min", bool),
 65        ("local_pt", bool),
 66    ]
 67
 68    gen_in = ["x", "f", "grad", "local_pt", "sim_id", "sim_ended", "x_on_cube", "local_min"]
 69
 70    gen_specs = {
 71        "gen_f": gen_f,
 72        "in": gen_in,
 73        "persis_in": gen_in,
 74        "out": gen_out,
 75        "user": {
 76            "initial_sample_size": 0,  # Don't need to do evaluations because the sampling already done below
 77            "localopt_method": "LD_MMA",
 78            "rk_const": 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
 79            "stop_after_this_many_minima": 25,
 80            "xtol_rel": 1e-6,
 81            "ftol_rel": 1e-6,
 82            "max_active_runs": 6,
 83            "lb": np.array([-3, -2]),
 84            "ub": np.array([3, 2]),
 85        },
 86    }
 87
 88    alloc_specs = {"alloc_f": alloc_f}
 89
 90    persis_info = add_unique_random_streams({}, nworkers + 1)
 91
 92    exit_criteria = {"sim_max": 1000}
 93
 94    # Load in "already completed" set of 'x','f','grad' values to give to libE/persistent_aposmm
 95    sample_size = len(minima)
 96
 97    H0_dtype = [
 98        ("x", float, n),
 99        ("grad", float, n),
100        ("sim_id", int),
101        ("x_on_cube", float, n),
102        ("sim_ended", bool),
103        ("f", float),
104        ("gen_informed", bool),
105        ("sim_started", bool),
106    ]
107    H0 = np.zeros(sample_size, dtype=H0_dtype)
108
109    # Two points in the following sample have the same best function value, which
110    # tests the corner case for some APOSMM logic
111    H0["x"] = np.round(minima, 1)
112    H0["x_on_cube"] = (H0["x"] - gen_specs["user"]["lb"]) / (gen_specs["user"]["ub"] - gen_specs["user"]["lb"])
113    H0["sim_id"] = range(sample_size)
114    H0[["sim_started", "gen_informed", "sim_ended"]] = True
115
116    for i in range(sample_size):
117        H0["f"][i] = six_hump_camel_func(H0["x"][i])
118        H0["grad"][i] = six_hump_camel_grad(H0["x"][i])
119
120    # Perform the run
121    H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs, H0=H0)
122
123    if is_manager:
124        print("[Manager]:", H[np.where(H["local_min"])]["x"])
125        print("[Manager]: Time taken =", time() - start_time, flush=True)
126
127        tol = 1e-5
128        for m in minima:
129            # The minima are known on this test problem.
130            # We use their values to test APOSMM has identified all minima
131            print(np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)), flush=True)
132            assert np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)) < tol
133
134        assert len(H) < exit_criteria["sim_max"], "Test should have stopped early"
135
136        save_libE_output(H, persis_info, __file__, nworkers)