Calling Scripts

Below are example calling scripts used to populate specifications for each user function and libEnsemble before initiating libEnsemble via the primary libE() call. The primary libEnsemble-relevant portions have been highlighted in each example. Non-highlighted portions may include setup routines, compilation steps for user applications, or output processing. The first two scripts correspond to random sampling calculations, while the third corresponds to an optimization routine.

Many other examples of calling scripts can be found in libEnsemble’s regression tests.

Local Sine Tutorial

This example is from the Local Sine Tutorial, meant to run with Python’s multiprocessing as the primary comms method.

examples/tutorials/tutorial_calling.py
 1import numpy as np
 2import matplotlib.pyplot as plt
 3from libensemble.libE import libE
 4from libensemble.tools import add_unique_random_streams
 5from tutorial_gen import gen_random_sample
 6from tutorial_sim import sim_find_sine
 7
 8nworkers = 4
 9libE_specs = {'nworkers': nworkers, 'comms': 'local'}
10
11gen_specs = {'gen_f': gen_random_sample,        # Our generator function
12             'out': [('x', float, (1,))],       # gen_f output (name, type, size).
13             'user': {'lower': np.array([-3]),  # random sampling lower bound
14                      'upper': np.array([3]),   # random sampling upper bound
15                      'gen_batch_size': 5       # number of values gen_f will generate per call
16                      }
17             }
18
19sim_specs = {'sim_f': sim_find_sine,            # Our simulator function
20             'in': ['x'],                       # Input field names. 'x' from gen_f output
21             'out': [('y', float)]}             # sim_f output. 'y' = sine('x')
22
23persis_info = add_unique_random_streams({}, nworkers+1)  # Intitialize manager/workers random streams
24
25exit_criteria = {'sim_max': 80}                 # Stop libEnsemble after 80 simulations
26
27H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info,
28                            libE_specs=libE_specs)
29
30# Some (optional) statements to visualize our History array
31print([i for i in H.dtype.fields])
32print(H)
33
34colors = ['b', 'g', 'r', 'y', 'm', 'c', 'k', 'w']
35
36for i in range(1, nworkers + 1):
37    worker_xy = np.extract(H['sim_worker'] == i, H)
38    x = [entry.tolist()[0] for entry in worker_xy['x']]
39    y = [entry for entry in worker_xy['y']]
40    plt.scatter(x, y, label='Worker {}'.format(i), c=colors[i-1])
41
42plt.title('Sine calculations for a uniformly sampled random distribution')
43plt.xlabel('x')
44plt.ylabel('sine(x)')
45plt.legend(loc='lower right')
46plt.savefig('tutorial_sines.png')

Electrostatic Forces with Executor

This example is from a test for evaluating the scaling capabilities of libEnsemble by calculating particle electrostatic forces through a user application. This application is registered with either the MPI or Balsam Executor, then submitted for execution in the sim_f. Note the use of the parse_args() and save_libE_output() convenience functions from the tools module.

tests/scaling_tests/forces/run_libe_forces.py
  1#!/usr/bin/env python
  2import os
  3import numpy as np
  4from forces_simf import run_forces  # Sim func from current dir
  5
  6# Import libEnsemble modules
  7from libensemble.libE import libE
  8from libensemble.manager import ManagerException
  9from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 10from libensemble import logger
 11from forces_support import test_libe_stats, test_ensemble_dir, check_log_exception
 12
 13USE_BALSAM = False
 14PERSIS_GEN = False
 15
 16if PERSIS_GEN:
 17    from libensemble.gen_funcs.persistent_uniform_sampling import persistent_uniform as gen_f
 18    from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f
 19else:
 20    from libensemble.gen_funcs.sampling import uniform_random_sample as gen_f
 21    from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first as alloc_f
 22
 23
 24logger.set_level('INFO')  # INFO is now default
 25
 26nworkers, is_manager, libE_specs, _ = parse_args()
 27
 28if is_manager:
 29    print('\nRunning with {} workers\n'.format(nworkers))
 30
 31sim_app = os.path.join(os.getcwd(), 'forces.x')
 32
 33# Normally would be pre-compiled
 34if not os.path.isfile('forces.x'):
 35    if os.path.isfile('build_forces.sh'):
 36        import subprocess
 37        subprocess.check_call(['./build_forces.sh'])
 38
 39# Create executor and register sim to it.
 40if USE_BALSAM:
 41    from libensemble.executors.balsam_executor import BalsamMPIExecutor
 42    exctr = BalsamMPIExecutor()  # Use allow_oversubscribe=False to prevent oversubscription
 43else:
 44    from libensemble.executors.mpi_executor import MPIExecutor
 45    exctr = MPIExecutor()  # Use allow_oversubscribe=False to prevent oversubscription
 46exctr.register_calc(full_path=sim_app, calc_type='sim')
 47
 48# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.
 49
 50# State the objective function, its arguments, output, and necessary parameters (and their sizes)
 51sim_specs = {'sim_f': run_forces,         # Function whose output is being minimized
 52             'in': ['x'],                 # Name of input for sim_f
 53             'out': [('energy', float)],  # Name, type of output from sim_f
 54             'user': {'keys': ['seed'],
 55                      'cores': 2,
 56                      'sim_particles': 1e3,
 57                      'sim_timesteps': 5,
 58                      'sim_kill_minutes': 10.0,
 59                      'particle_variance': 0.2,
 60                      'kill_rate': 0.5,
 61                      'fail_on_sim': False,
 62                      'fail_on_submit': False}  # Won't occur if 'fail_on_sim' True
 63             }
 64# end_sim_specs_rst_tag
 65
 66# State the generating function, its arguments, output, and necessary parameters.
 67gen_specs = {'gen_f': gen_f,                  # Generator function
 68             'in': [],                        # Generator input
 69             'out': [('x', float, (1,))],     # Name, type and size of data produced (must match sim_specs 'in')
 70             'user': {'lb': np.array([0]),             # Lower bound for random sample array (1D)
 71                      'ub': np.array([32767]),         # Upper bound for random sample array (1D)
 72                      'gen_batch_size': 1000,          # How many random samples to generate in one call
 73                      }
 74             }
 75
 76if PERSIS_GEN:
 77    alloc_specs = {'alloc_f': alloc_f, 'out': [('given_back', bool)]}
 78else:
 79    alloc_specs = {'alloc_f': alloc_f,
 80                   'out': [('allocated', bool)],
 81                   'user': {'batch_mode': True,    # If true wait for all sims to process before generate more
 82                            'num_active_gens': 1}  # Only one active generator at a time
 83                   }
 84
 85libE_specs['save_every_k_gens'] = 1000  # Save every K steps
 86libE_specs['sim_dirs_make'] = True      # Separate each sim into a separate directory
 87libE_specs['profile'] = False    # Whether to have libE profile on (default False)
 88
 89# Maximum number of simulations
 90sim_max = 8
 91exit_criteria = {'sim_max': sim_max}
 92
 93# Create a different random number stream for each worker and the manager
 94persis_info = {}
 95persis_info = add_unique_random_streams(persis_info, nworkers + 1)
 96
 97try:
 98    H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria,
 99                                persis_info=persis_info,
100                                alloc_specs=alloc_specs,
101                                libE_specs=libE_specs)
102
103except ManagerException:
104    if is_manager and sim_specs['user']['fail_on_sim']:
105        check_log_exception()
106        test_libe_stats('Exception occurred\n')
107else:
108    if is_manager:
109        save_libE_output(H, persis_info, __file__, nworkers)
110        if sim_specs['user']['fail_on_submit']:
111            test_libe_stats('Task Failed\n')
112        test_ensemble_dir(libE_specs, './ensemble', nworkers, sim_max)

Persistent APOSMM with Gradients

This example is also from the regression tests and demonstrates configuring a persistent run via a custom allocation function.

tests/regression_tests/test_persistent_aposmm_with_grad.py
  1# """
  2# Runs libEnsemble on the 6-hump camel problem. Documented here:
  3#    https://www.sfu.ca/~ssurjano/camel6.html
  4#
  5# Execute via one of the following commands (e.g. 3 workers):
  6#    mpiexec -np 4 python3 test_6-hump_camel_persistent_uniform_sampling.py
  7#    python3 test_6-hump_camel_persistent_uniform_sampling.py --nworkers 3 --comms local
  8#    python3 test_6-hump_camel_persistent_uniform_sampling.py --nworkers 3 --comms tcp
  9#
 10# The number of concurrent evaluations of the objective function will be 4-1=3.
 11# """
 12
 13# Do not change these lines - they are parsed by run-tests.sh
 14# TESTSUITE_COMMS: local mpi tcp
 15# TESTSUITE_NPROCS: 4
 16
 17import sys
 18import numpy as np
 19
 20# Import libEnsemble items for this test
 21from libensemble.libE import libE
 22from math import gamma, pi, sqrt
 23from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f, six_hump_camel_func, six_hump_camel_grad
 24
 25import libensemble.gen_funcs
 26libensemble.gen_funcs.rc.aposmm_optimizers = 'nlopt'
 27from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
 28
 29from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
 30from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
 31from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
 32from time import time
 33
 34nworkers, is_manager, libE_specs, _ = parse_args()
 35
 36if is_manager:
 37    start_time = time()
 38
 39if nworkers < 2:
 40    sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
 41
 42n = 2
 43sim_specs = {'sim_f': sim_f,
 44             'in': ['x'],
 45             'out': [('f', float), ('grad', float, n)]}
 46
 47gen_out = [('x', float, n), ('x_on_cube', float, n), ('sim_id', int),
 48           ('local_min', bool), ('local_pt', bool)]
 49
 50gen_specs = {'gen_f': gen_f,
 51             'in': ['x', 'f', 'grad', 'local_pt', 'sim_id', 'returned', 'x_on_cube', 'local_min'],
 52             'out': gen_out,
 53             'user': {'initial_sample_size': 0,  # Don't need to do evaluations because the sampling already done below
 54                      'localopt_method': 'LD_MMA',
 55                      'rk_const': 0.5*((gamma(1+(n/2))*5)**(1/n))/sqrt(pi),
 56                      'stop_after_this_many_minima': 25,
 57                      'xtol_rel': 1e-6,
 58                      'ftol_rel': 1e-6,
 59                      'max_active_runs': 6,
 60                      'lb': np.array([-3, -2]),
 61                      'ub': np.array([3, 2])}
 62             }
 63
 64alloc_specs = {'alloc_f': alloc_f, 'out': [('given_back', bool)], 'user': {}}
 65
 66persis_info = add_unique_random_streams({}, nworkers + 1)
 67
 68exit_criteria = {'sim_max': 1000}
 69
 70# Load in "already completed" set of 'x','f','grad' values to give to libE/persistent_aposmm
 71sample_size = len(minima)
 72
 73H0 = np.zeros(sample_size, dtype=[('x', float, n), ('grad', float, n), ('sim_id', int),
 74                                  ('x_on_cube', float, n), ('returned', bool),
 75                                  ('f', float), ('given_back', bool), ('given', bool)])
 76
 77# Two points in the following sample have the same best function value, which
 78# tests the corner case for some APOSMM logic
 79H0['x'] = np.round(minima, 1)
 80H0['x_on_cube'] = (H0['x']-gen_specs['user']['lb']) / (gen_specs['user']['ub']-gen_specs['user']['lb'])
 81H0['sim_id'] = range(sample_size)
 82H0[['given', 'given_back', 'returned']] = True
 83
 84for i in range(sample_size):
 85    H0['f'][i] = six_hump_camel_func(H0['x'][i])
 86    H0['grad'][i] = six_hump_camel_grad(H0['x'][i])
 87
 88# Perform the run
 89H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info,
 90                            alloc_specs, libE_specs, H0=H0)
 91
 92if is_manager:
 93    print('[Manager]:', H[np.where(H['local_min'])]['x'])
 94    print('[Manager]: Time taken =', time() - start_time, flush=True)
 95
 96    tol = 1e-5
 97    for m in minima:
 98        # The minima are known on this test problem.
 99        # We use their values to test APOSMM has identified all minima
100        print(np.min(np.sum((H[H['local_min']]['x'] - m)**2, 1)), flush=True)
101        assert np.min(np.sum((H[H['local_min']]['x'] - m)**2, 1)) < tol
102
103    assert len(H) < exit_criteria['sim_max'], "Test should have stopped early"
104
105    save_libE_output(H, persis_info, __file__, nworkers)