Calling Scripts

Below are example calling scripts used to populate specifications for each user function and libEnsemble before initiating libEnsemble via the primary libE() call. The primary libEnsemble-relevant portions have been highlighted in each example. Non-highlighted portions may include setup routines, compilation steps for user applications, or output processing. The first two scripts correspond to random sampling calculations, while the third corresponds to an optimization routine.

Many other examples of calling scripts can be found in libEnsemble’s regression tests.

Local Sine Tutorial

This example is from the Local Sine Tutorial, meant to run with Python’s multiprocessing as the primary comms method.

examples/tutorials/simple_sine/tutorial_calling.py
 1import numpy as np
 2import matplotlib.pyplot as plt
 3from libensemble.libE import libE
 4from libensemble.tools import add_unique_random_streams
 5from tutorial_gen import gen_random_sample
 6from tutorial_sim import sim_find_sine
 7
 8nworkers = 4
 9libE_specs = {'nworkers': nworkers, 'comms': 'local'}
10
11gen_specs = {'gen_f': gen_random_sample,        # Our generator function
12             'out': [('x', float, (1,))],       # gen_f output (name, type, size).
13             'user': {'lower': np.array([-3]),  # random sampling lower bound
14                      'upper': np.array([3]),   # random sampling upper bound
15                      'gen_batch_size': 5       # number of values gen_f will generate per call
16                      }
17             }
18
19sim_specs = {'sim_f': sim_find_sine,            # Our simulator function
20             'in': ['x'],                       # Input field names. 'x' from gen_f output
21             'out': [('y', float)]}             # sim_f output. 'y' = sine('x')
22
23persis_info = add_unique_random_streams({}, nworkers+1)  # Initialize manager/workers random streams
24
25exit_criteria = {'sim_max': 80}                 # Stop libEnsemble after 80 simulations
26
27H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info,
28                            libE_specs=libE_specs)
29
30# Some (optional) statements to visualize our History array
31print([i for i in H.dtype.fields])
32print(H)
33
34colors = ['b', 'g', 'r', 'y', 'm', 'c', 'k', 'w']
35
36for i in range(1, nworkers + 1):
37    worker_xy = np.extract(H['sim_worker'] == i, H)
38    x = [entry.tolist()[0] for entry in worker_xy['x']]
39    y = [entry for entry in worker_xy['y']]
40    plt.scatter(x, y, label='Worker {}'.format(i), c=colors[i-1])
41
42plt.title('Sine calculations for a uniformly sampled random distribution')
43plt.xlabel('x')
44plt.ylabel('sine(x)')
45plt.legend(loc='lower right')
46plt.savefig('tutorial_sines.png')

Electrostatic Forces with Executor

These examples are from a test for evaluating the scaling capabilities of libEnsemble by calculating particle electrostatic forces through a user application. This application is registered with either the MPI or Balsam Executor, then submitted for execution in the sim_f. Note the use of the parse_args() and save_libE_output() convenience functions from the tools module in the first calling script.

Traditional Version

tests/scaling_tests/forces/run_libe_forces.py
  1#!/usr/bin/env python
  2import os
  3import numpy as np
  4from forces_simf import run_forces  # Sim func from current dir
  5
  6# Import libEnsemble modules
  7from libensemble.libE import libE
  8from libensemble.manager import ManagerException
  9from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 10from libensemble import logger
 11from forces_support import test_libe_stats, test_ensemble_dir, check_log_exception
 12
 13USE_BALSAM = False
 14PERSIS_GEN = False
 15
 16if PERSIS_GEN:
 17    from libensemble.gen_funcs.persistent_uniform_sampling import persistent_uniform as gen_f
 18    from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f
 19else:
 20    from libensemble.gen_funcs.sampling import uniform_random_sample as gen_f
 21    from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first as alloc_f
 22
 23
 24logger.set_level('INFO')  # INFO is now default
 25
 26nworkers, is_manager, libE_specs, _ = parse_args()
 27
 28if is_manager:
 29    print('\nRunning with {} workers\n'.format(nworkers))
 30
 31sim_app = os.path.join(os.getcwd(), 'forces.x')
 32
 33# Normally would be pre-compiled
 34if not os.path.isfile('forces.x'):
 35    if os.path.isfile('build_forces.sh'):
 36        import subprocess
 37        subprocess.check_call(['./build_forces.sh'])
 38
 39# Create executor and register sim to it.
 40if USE_BALSAM:
 41    from libensemble.executors.balsam_executor import BalsamMPIExecutor
 42    exctr = BalsamMPIExecutor()
 43else:
 44    from libensemble.executors.mpi_executor import MPIExecutor
 45    exctr = MPIExecutor()
 46exctr.register_app(full_path=sim_app, app_name='forces')
 47
 48# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.
 49
 50# State the objective function, its arguments, output, and necessary parameters (and their sizes)
 51sim_specs = {'sim_f': run_forces,         # Function whose output is being minimized
 52             'in': ['x'],                 # Name of input for sim_f
 53             'out': [('energy', float)],  # Name, type of output from sim_f
 54             'user': {'keys': ['seed'],
 55                      'cores': 2,
 56                      'sim_particles': 1e3,
 57                      'sim_timesteps': 5,
 58                      'sim_kill_minutes': 10.0,
 59                      'particle_variance': 0.2,
 60                      'kill_rate': 0.5,
 61                      'fail_on_sim': False,
 62                      'fail_on_submit': False}  # Won't occur if 'fail_on_sim' True
 63             }
 64# end_sim_specs_rst_tag
 65
 66# State the generating function, its arguments, output, and necessary parameters.
 67gen_specs = {'gen_f': gen_f,                  # Generator function
 68             'in': [],                        # Generator input
 69             'out': [('x', float, (1,))],     # Name, type and size of data produced (must match sim_specs 'in')
 70             'user': {'lb': np.array([0]),             # Lower bound for random sample array (1D)
 71                      'ub': np.array([32767]),         # Upper bound for random sample array (1D)
 72                      'gen_batch_size': 1000,          # How many random samples to generate in one call
 73                      }
 74             }
 75
 76if PERSIS_GEN:
 77    alloc_specs = {'alloc_f': alloc_f}
 78else:
 79    alloc_specs = {'alloc_f': alloc_f,
 80                   'user': {'batch_mode': True,    # If true wait for all sims to process before generate more
 81                            'num_active_gens': 1}  # Only one active generator at a time
 82                   }
 83
 84libE_specs['save_every_k_gens'] = 1000  # Save every K steps
 85libE_specs['sim_dirs_make'] = True      # Separate each sim into a separate directory
 86libE_specs['profile'] = False    # Whether to have libE profile on (default False)
 87
 88# Maximum number of simulations
 89sim_max = 8
 90exit_criteria = {'sim_max': sim_max}
 91
 92# Create a different random number stream for each worker and the manager
 93persis_info = {}
 94persis_info = add_unique_random_streams(persis_info, nworkers + 1)
 95
 96try:
 97    H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria,
 98                                persis_info=persis_info,
 99                                alloc_specs=alloc_specs,
100                                libE_specs=libE_specs)
101
102except ManagerException:
103    if is_manager and sim_specs['user']['fail_on_sim']:
104        check_log_exception()
105        test_libe_stats('Exception occurred\n')
106else:
107    if is_manager:
108        save_libE_output(H, persis_info, __file__, nworkers)
109        if sim_specs['user']['fail_on_submit']:
110            test_libe_stats('Task Failed\n')
111        test_ensemble_dir(libE_specs, './ensemble', nworkers, sim_max)

Object + yaml Version

tests/scaling_tests/forces/run_libe_forces_from_yaml.py
 1#!/usr/bin/env python
 2import os
 3import numpy as np
 4
 5from libensemble import Ensemble
 6from libensemble.executors.mpi_executor import MPIExecutor
 7
 8####################
 9
10sim_app = os.path.join(os.getcwd(), 'forces.x')
11
12if not os.path.isfile('forces.x'):
13    if os.path.isfile('build_forces.sh'):
14        import subprocess
15        subprocess.check_call(['./build_forces.sh'])
16
17####################
18
19forces = Ensemble()
20forces.from_yaml('forces.yaml')
21
22forces.logger.set_level('INFO')
23
24if forces.is_manager:
25    print('\nRunning with {} workers\n'.format(forces.nworkers))
26
27exctr = MPIExecutor()
28exctr.register_app(full_path=sim_app, app_name='forces')
29
30forces.libE_specs['ensemble_dir_path'] = './ensemble'
31forces.gen_specs['user'].update({
32    'lb': np.array([0]),
33    'ub': np.array([32767])
34})
35
36forces.persis_info.add_random_streams()
37
38forces.run()
39
40if forces.is_manager:
41    forces.save_output(__file__)
tests/scaling_tests/forces/forces.yaml
 1libE_specs:
 2    save_every_k_gens: 1000
 3    sim_dirs_make: True
 4    profile: False
 5    exit_criteria:
 6        sim_max: 8
 7
 8sim_specs:
 9    function: forces_simf.run_forces
10    inputs:
11        - x
12    outputs:
13        energy:
14            type: float
15
16    user:
17        keys:
18            - seed
19        cores: 1
20        sim_particles: 1.e+3
21        sim_timesteps: 5
22        sim_kill_minutes: 10.0
23        particle_variance: 0.2
24        kill_rate: 0.5
25        fail_on_sim: False
26        fail_on_submit: False
27
28gen_specs:
29    function: libensemble.gen_funcs.sampling.uniform_random_sample
30    outputs:
31        x:
32            type: float
33            size: 1
34    user:
35        gen_batch_size: 1000
36
37alloc_specs:
38    function: libensemble.alloc_funcs.give_sim_work_first.give_sim_work_first
39    outputs:
40        allocated:
41            type: bool
42    user:
43        batch_mode: True
44        num_active_gens: 1

Persistent APOSMM with Gradients

This example is also from the regression tests and demonstrates configuring a persistent run via a custom allocation function.

tests/regression_tests/test_persistent_aposmm_with_grad.py
  1"""
  2Runs libEnsemble with APOSMM with an NLopt local optimizer that uses gradient
  3information from the sim_f
  4
  5Execute via one of the following commands (e.g. 3 workers):
  6   mpiexec -np 4 python3 test_persistent_aposmm_with_grad.py
  7   python3 test_persistent_aposmm_with_grad.py --nworkers 3 --comms local
  8   python3 test_persistent_aposmm_with_grad.py --nworkers 3 --comms tcp
  9
 10When running with the above commands, the number of concurrent evaluations of
 11the objective function will be 2, as one of the three workers will be the
 12persistent generator.
 13"""
 14
 15# Do not change these lines - they are parsed by run-tests.sh
 16# TESTSUITE_COMMS: local mpi tcp
 17# TESTSUITE_NPROCS: 4
 18# TESTSUITE_EXTRA: true
 19
 20import sys
 21import numpy as np
 22
 23# Import libEnsemble items for this test
 24from libensemble.libE import libE
 25from math import gamma, pi, sqrt
 26from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f, six_hump_camel_func, six_hump_camel_grad
 27
 28import libensemble.gen_funcs
 29
 30libensemble.gen_funcs.rc.aposmm_optimizers = 'nlopt'
 31from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
 32
 33from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
 34from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 35from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
 36from time import time
 37
 38nworkers, is_manager, libE_specs, _ = parse_args()
 39
 40if is_manager:
 41    start_time = time()
 42
 43if nworkers < 2:
 44    sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
 45
 46n = 2
 47sim_specs = {
 48    'sim_f': sim_f,
 49    'in': ['x'],
 50    'out': [('f', float), ('grad', float, n)],
 51}
 52
 53gen_out = [
 54    ('x', float, n),
 55    ('x_on_cube', float, n),
 56    ('sim_id', int),
 57    ('local_min', bool),
 58    ('local_pt', bool),
 59]
 60
 61gen_in = ['x', 'f', 'grad', 'local_pt', 'sim_id', 'returned', 'x_on_cube', 'local_min']
 62
 63gen_specs = {
 64    'gen_f': gen_f,
 65    'in': gen_in,
 66    'persis_in': gen_in,
 67    'out': gen_out,
 68    'user': {
 69        'initial_sample_size': 0,  # Don't need to do evaluations because the sampling already done below
 70        'localopt_method': 'LD_MMA',
 71        'rk_const': 0.5 * ((gamma(1 + (n / 2)) * 5) ** (1 / n)) / sqrt(pi),
 72        'stop_after_this_many_minima': 25,
 73        'xtol_rel': 1e-6,
 74        'ftol_rel': 1e-6,
 75        'max_active_runs': 6,
 76        'lb': np.array([-3, -2]),
 77        'ub': np.array([3, 2]),
 78    },
 79}
 80
 81alloc_specs = {'alloc_f': alloc_f}
 82
 83persis_info = add_unique_random_streams({}, nworkers + 1)
 84
 85exit_criteria = {'sim_max': 1000}
 86
 87# Load in "already completed" set of 'x','f','grad' values to give to libE/persistent_aposmm
 88sample_size = len(minima)
 89
 90H0_dtype = [
 91    ('x', float, n),
 92    ('grad', float, n),
 93    ('sim_id', int),
 94    ('x_on_cube', float, n),
 95    ('returned', bool),
 96    ('f', float),
 97    ('given_back', bool),
 98    ('given', bool),
 99]
100H0 = np.zeros(sample_size, dtype=H0_dtype)
101
102# Two points in the following sample have the same best function value, which
103# tests the corner case for some APOSMM logic
104H0['x'] = np.round(minima, 1)
105H0['x_on_cube'] = (H0['x'] - gen_specs['user']['lb']) / (gen_specs['user']['ub'] - gen_specs['user']['lb'])
106H0['sim_id'] = range(sample_size)
107H0[['given', 'given_back', 'returned']] = True
108
109for i in range(sample_size):
110    H0['f'][i] = six_hump_camel_func(H0['x'][i])
111    H0['grad'][i] = six_hump_camel_grad(H0['x'][i])
112
113# Perform the run
114H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info, alloc_specs, libE_specs, H0=H0)
115
116if is_manager:
117    print('[Manager]:', H[np.where(H['local_min'])]['x'])
118    print('[Manager]: Time taken =', time() - start_time, flush=True)
119
120    tol = 1e-5
121    for m in minima:
122        # The minima are known on this test problem.
123        # We use their values to test APOSMM has identified all minima
124        print(np.min(np.sum((H[H['local_min']]['x'] - m) ** 2, 1)), flush=True)
125        assert np.min(np.sum((H[H['local_min']]['x'] - m) ** 2, 1)) < tol
126
127    assert len(H) < exit_criteria['sim_max'], "Test should have stopped early"
128
129    save_libE_output(H, persis_info, __file__, nworkers)