Calling Scripts

Below are example calling scripts used to populate specifications for each user function and libEnsemble before initiating libEnsemble via the primary libE() call. The primary libEnsemble-relevant portions have been highlighted in each example. Non-highlighted portions may include setup routines, compilation steps for user applications, or output processing. The first two scripts correspond to random sampling calculations, while the third corresponds to an optimization routine.

Many other examples of calling scripts can be found in libEnsemble’s regression tests.

Local Sine Tutorial

This example is from the Local Sine Tutorial, meant to run with Python’s multiprocessing as the primary comms method.

examples/tutorials/simple_sine/tutorial_calling.py
 1import numpy as np
 2import matplotlib.pyplot as plt
 3from libensemble.libE import libE
 4from libensemble.tools import add_unique_random_streams
 5from tutorial_gen import gen_random_sample
 6from tutorial_sim import sim_find_sine
 7
 8nworkers = 4
 9libE_specs = {"nworkers": nworkers, "comms": "local"}
10
11gen_specs = {
12    "gen_f": gen_random_sample,  # Our generator function
13    "out": [("x", float, (1,))],  # gen_f output (name, type, size).
14    "user": {
15        "lower": np.array([-3]),  # random sampling lower bound
16        "upper": np.array([3]),  # random sampling upper bound
17        "gen_batch_size": 5,  # number of values gen_f will generate per call
18    },
19}
20
21sim_specs = {
22    "sim_f": sim_find_sine,  # Our simulator function
23    "in": ["x"],  # Input field names. 'x' from gen_f output
24    "out": [("y", float)],  # sim_f output. 'y' = sine('x')
25}
26
27persis_info = add_unique_random_streams({}, nworkers + 1)  # Initialize manager/workers random streams
28
29exit_criteria = {"sim_max": 80}  # Stop libEnsemble after 80 simulations
30
31H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, libE_specs=libE_specs)
32
33# Some (optional) statements to visualize our History array
34print([i for i in H.dtype.fields])
35print(H)
36
37colors = ["b", "g", "r", "y", "m", "c", "k", "w"]
38
39for i in range(1, nworkers + 1):
40    worker_xy = np.extract(H["sim_worker"] == i, H)
41    x = [entry.tolist()[0] for entry in worker_xy["x"]]
42    y = [entry for entry in worker_xy["y"]]
43    plt.scatter(x, y, label="Worker {}".format(i), c=colors[i - 1])
44
45plt.title("Sine calculations for a uniformly sampled random distribution")
46plt.xlabel("x")
47plt.ylabel("sine(x)")
48plt.legend(loc="lower right")
49plt.savefig("tutorial_sines.png")

Electrostatic Forces with Executor

These examples are from a test for evaluating the scaling capabilities of libEnsemble by calculating particle electrostatic forces through a user application. This application is registered with either the MPI or Balsam Executor, then submitted for execution in the sim_f. Note the use of the parse_args() and save_libE_output() convenience functions from the tools module in the first calling script.

Traditional Version

tests/scaling_tests/forces/forces_adv/run_libe_forces.py
  1#!/usr/bin/env python
  2import os
  3import sys
  4import numpy as np
  5from forces_simf import run_forces  # Sim func from current dir
  6
  7# Import libEnsemble modules
  8from libensemble.libE import libE
  9from libensemble.manager import ManagerException
 10from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 11from libensemble import logger
 12from forces_support import test_libe_stats, test_ensemble_dir, check_log_exception
 13
 14# Note the Balsam option here is now LegacyBalsam - see balsam_forces for latest.
 15USE_BALSAM = False
 16
 17PERSIS_GEN = False
 18
 19if PERSIS_GEN:
 20    from libensemble.gen_funcs.persistent_sampling import persistent_uniform as gen_f
 21    from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f
 22else:
 23    from libensemble.gen_funcs.sampling import uniform_random_sample as gen_f
 24    from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first as alloc_f
 25
 26
 27logger.set_level("INFO")  # INFO is now default
 28
 29nworkers, is_manager, libE_specs, _ = parse_args()
 30
 31sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
 32
 33if not os.path.isfile(sim_app):
 34    sys.exit("forces.x not found - please build first in ../forces_app dir")
 35
 36if is_manager:
 37    print("\nRunning with {} workers\n".format(nworkers))
 38
 39
 40# Create executor and register sim to it.
 41if USE_BALSAM:
 42    from libensemble.executors.legacy_balsam_executor import LegacyBalsamMPIExecutor
 43
 44    exctr = LegacyBalsamMPIExecutor()
 45else:
 46    from libensemble.executors.mpi_executor import MPIExecutor
 47
 48    exctr = MPIExecutor()
 49exctr.register_app(full_path=sim_app, app_name="forces")
 50
 51# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.
 52
 53# State the objective function, its arguments, output, and necessary parameters (and their sizes)
 54sim_specs = {
 55    "sim_f": run_forces,  # Function whose output is being minimized
 56    "in": ["x"],  # Name of input for sim_f
 57    "out": [("energy", float)],  # Name, type of output from sim_f
 58    "user": {
 59        "keys": ["seed"],
 60        "cores": 2,
 61        "sim_particles": 1e3,
 62        "sim_timesteps": 5,
 63        "sim_kill_minutes": 10.0,
 64        "particle_variance": 0.2,
 65        "kill_rate": 0.5,
 66        "fail_on_sim": False,
 67        "fail_on_submit": False,  # Won't occur if 'fail_on_sim' True
 68    },
 69}
 70# end_sim_specs_rst_tag
 71
 72# State the generating function, its arguments, output, and necessary parameters.
 73gen_specs = {
 74    "gen_f": gen_f,  # Generator function
 75    "in": [],  # Generator input
 76    "out": [("x", float, (1,))],  # Name, type and size of data produced (must match sim_specs 'in')
 77    "user": {
 78        "lb": np.array([0]),  # Lower bound for random sample array (1D)
 79        "ub": np.array([32767]),  # Upper bound for random sample array (1D)
 80        "gen_batch_size": 1000,  # How many random samples to generate in one call
 81    },
 82}
 83
 84if PERSIS_GEN:
 85    alloc_specs = {"alloc_f": alloc_f}
 86else:
 87    alloc_specs = {
 88        "alloc_f": alloc_f,
 89        "user": {
 90            "batch_mode": True,  # If true wait for all sims to process before generate more
 91            "num_active_gens": 1,  # Only one active generator at a time
 92        },
 93    }
 94
 95libE_specs["save_every_k_gens"] = 1000  # Save every K steps
 96libE_specs["sim_dirs_make"] = True  # Separate each sim into a separate directory
 97libE_specs["profile"] = False  # Whether to have libE profile on (default False)
 98
 99# Maximum number of simulations
100sim_max = 8
101exit_criteria = {"sim_max": sim_max}
102
103# Create a different random number stream for each worker and the manager
104persis_info = {}
105persis_info = add_unique_random_streams(persis_info, nworkers + 1)
106
107try:
108    H, persis_info, flag = libE(
109        sim_specs,
110        gen_specs,
111        exit_criteria,
112        persis_info=persis_info,
113        alloc_specs=alloc_specs,
114        libE_specs=libE_specs,
115    )
116
117except ManagerException:
118    if is_manager and sim_specs["user"]["fail_on_sim"]:
119        check_log_exception()
120        test_libe_stats("Exception occurred\n")
121else:
122    if is_manager:
123        save_libE_output(H, persis_info, __file__, nworkers)
124        if sim_specs["user"]["fail_on_submit"]:
125            test_libe_stats("Task Failed\n")
126        test_ensemble_dir(libE_specs, "./ensemble", nworkers, sim_max)

Object + yaml Version

tests/scaling_tests/forces/forces_adv/run_libe_forces_from_yaml.py
 1#!/usr/bin/env python
 2import os
 3import sys
 4import numpy as np
 5
 6from libensemble import Ensemble
 7from libensemble.executors.mpi_executor import MPIExecutor
 8
 9####################
10
11sim_app = os.path.join(os.getcwd(), "../forces_app/forces.x")
12
13if not os.path.isfile(sim_app):
14    sys.exit("forces.x not found - please build first in ../forces_app dir")
15
16
17####################
18
19forces = Ensemble()
20forces.from_yaml("forces.yaml")
21
22forces.logger.set_level("INFO")
23
24if forces.is_manager:
25    print("\nRunning with {} workers\n".format(forces.nworkers))
26
27exctr = MPIExecutor()
28exctr.register_app(full_path=sim_app, app_name="forces")
29
30forces.libE_specs["ensemble_dir_path"] = "./ensemble"
31forces.gen_specs["user"].update(
32    {
33        "lb": np.array([0]),
34        "ub": np.array([32767]),
35    }
36)
37
38forces.persis_info.add_random_streams()
39
40forces.run()
41
42if forces.is_manager:
43    forces.save_output(__file__)
tests/scaling_tests/forces/forces_adv/forces.yaml
 1libE_specs:
 2    save_every_k_gens: 1000
 3    sim_dirs_make: True
 4    profile: False
 5    exit_criteria:
 6        sim_max: 8
 7
 8sim_specs:
 9    function: forces_simf.run_forces
10    inputs:
11        - x
12    outputs:
13        energy:
14            type: float
15
16    user:
17        keys:
18            - seed
19        cores: 1
20        sim_particles: 1.e+3
21        sim_timesteps: 5
22        sim_kill_minutes: 10.0
23        particle_variance: 0.2
24        kill_rate: 0.5
25        fail_on_sim: False
26        fail_on_submit: False
27
28gen_specs:
29    function: libensemble.gen_funcs.sampling.uniform_random_sample
30    outputs:
31        x:
32            type: float
33            size: 1
34    user:
35        gen_batch_size: 1000
36
37alloc_specs:
38    function: libensemble.alloc_funcs.give_sim_work_first.give_sim_work_first
39    outputs:
40        allocated:
41            type: bool
42    user:
43        batch_mode: True
44        num_active_gens: 1

Persistent APOSMM with Gradients

This example is also from the regression tests and demonstrates configuring a persistent run via a custom allocation function.

tests/regression_tests/test_persistent_aposmm_with_grad.py
  1"""
  2Runs libEnsemble with APOSMM with an NLopt local optimizer that uses gradient
  3information from the sim_f
  4
  5Execute via one of the following commands (e.g. 3 workers):
  6   mpiexec -np 4 python test_persistent_aposmm_with_grad.py
  7   python test_persistent_aposmm_with_grad.py --nworkers 3 --comms local
  8   python test_persistent_aposmm_with_grad.py --nworkers 3 --comms tcp
  9
 10When running with the above commands, the number of concurrent evaluations of
 11the objective function will be 2, as one of the three workers will be the
 12persistent generator.
 13"""
 14
 15# Do not change these lines - they are parsed by run-tests.sh
 16# TESTSUITE_COMMS: local mpi tcp
 17# TESTSUITE_NPROCS: 4
 18# TESTSUITE_EXTRA: true
 19
 20import sys
 21import numpy as np
 22
 23# Import libEnsemble items for this test
 24from libensemble.libE import libE
 25from math import gamma, pi, sqrt
 26from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f, six_hump_camel_func, six_hump_camel_grad
 27
 28import libensemble.gen_funcs
 29
 30libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt"
 31from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
 32
 33from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
 34from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 35from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
 36from time import time
 37
 38nworkers, is_manager, libE_specs, _ = parse_args()
 39
 40if is_manager:
 41    start_time = time()
 42
 43if nworkers < 2:
 44    sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
 45
 46n = 2
 47sim_specs = {
 48    "sim_f": sim_f,
 49    "in": ["x"],
 50    "out": [("f", float), ("grad", float, n)],
 51}
 52
 53gen_out = [
 54    ("x", float, n),
 55    ("x_on_cube", float, n),
 56    ("sim_id", int),
 57    ("local_min", bool),
 58    ("local_pt", bool),
 59]
 60
 61gen_in = ["x", "f", "grad", "local_pt", "sim_id", "sim_ended", "x_on_cube", "local_min"]
 62
 63gen_specs = {
 64    "gen_f": gen_f,
 65    "in": gen_in,
 66    "persis_in": gen_in,
 67    "out": gen_out,
 68    "user": {
 69        "initial_sample_size": 0,  # Don't need to do evaluations because the sampling already done below
 70        "localopt_method": "LD_MMA",
 71        "rk_const": 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
 72        "stop_after_this_many_minima": 25,
 73        "xtol_rel": 1e-6,
 74        "ftol_rel": 1e-6,
 75        "max_active_runs": 6,
 76        "lb": np.array([-3, -2]),
 77        "ub": np.array([3, 2]),
 78    },
 79}
 80
 81alloc_specs = {"alloc_f": alloc_f}
 82
 83persis_info = add_unique_random_streams({}, nworkers + 1)
 84
 85exit_criteria = {"sim_max": 1000}
 86
 87# Load in "already completed" set of 'x','f','grad' values to give to libE/persistent_aposmm
 88sample_size = len(minima)
 89
 90H0_dtype = [
 91    ("x", float, n),
 92    ("grad", float, n),
 93    ("sim_id", int),
 94    ("x_on_cube", float, n),
 95    ("sim_ended", bool),
 96    ("f", float),
 97    ("gen_informed", bool),
 98    ("sim_started", bool),
 99]
100H0 = np.zeros(sample_size, dtype=H0_dtype)
101
102# Two points in the following sample have the same best function value, which
103# tests the corner case for some APOSMM logic
104H0["x"] = np.round(minima, 1)
105H0["x_on_cube"] = (H0["x"] - gen_specs["user"]["lb"]) / (gen_specs["user"]["ub"] - gen_specs["user"]["lb"])
106H0["sim_id"] = range(sample_size)
107H0[["sim_started", "gen_informed", "sim_ended"]] = True
108
109for i in range(sample_size):
110    H0["f"][i] = six_hump_camel_func(H0["x"][i])
111    H0["grad"][i] = six_hump_camel_grad(H0["x"][i])
112
113# Perform the run
114H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs, H0=H0)
115
116if is_manager:
117    print("[Manager]:", H[np.where(H["local_min"])]["x"])
118    print("[Manager]: Time taken =", time() - start_time, flush=True)
119
120    tol = 1e-5
121    for m in minima:
122        # The minima are known on this test problem.
123        # We use their values to test APOSMM has identified all minima
124        print(np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)), flush=True)
125        assert np.min(np.sum((H[H["local_min"]]["x"] - m) ** 2, 1)) < tol
126
127    assert len(H) < exit_criteria["sim_max"], "Test should have stopped early"
128
129    save_libE_output(H, persis_info, __file__, nworkers)