diff --git a/.flake8 b/.flake8 index b861c7ec5..12681d3a7 100644 --- a/.flake8 +++ b/.flake8 @@ -25,29 +25,23 @@ per-file-ignores = # systems/versions of MPI: libensemble/tests/standalone_tests/mpi_launch_test/create_mpi_jobs.py:E402 - # Similar reasoning for configuring mpi4py - libensemble/tests/unit_tests/test_executor.py:E402 - # Matplotlib needs something set before import scripts/*:E402 # Ignoring linelength in APOSMM libensemble/gen_funcs/*aposmm.py:E501 examples/gen_funcs/*aposmm.py:E501 + libensemble/tests/functionality_tests/test_mpi_runners.py:E501 + libensemble/tests/functionality_tests/test_mpi_runners_zrw_supernode_uneven.py:E501 # Need to set something before the APOSMM import libensemble/tests/scaling_tests/warpx/run_libensemble_on_warpx.py:E402 examples/calling_scripts/run_libensemble_on_warpx.py:E402 - libensemble/gen_funcs/persistent_aposmm.py:E402, E501 libensemble/tests/regression_tests/test_persistent_aposmm*:E402 - libensemble/tests/regression_tests/test_with_app_persistent_aposmm_tao_nm.py:E402 libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py:E402 libensemble/tests/regression_tests/test_ytopt_heffte.py:E402 libensemble/tests/functionality_tests/test_uniform_sampling_then_persistent_localopt_runs.py:E402 libensemble/tests/functionality_tests/test_active_persistent_worker_abort.py:E402 - libensemble/tests/functionality_tests/test_mpi_runners.py:E501 - libensemble/tests/functionality_tests/test_mpi_runners_zrw_supernode_uneven.py:E501 - libensemble/tests/deprecated_tests/test_old_aposmm*:E402 libensemble/tests/unit_tests/test_persistent_aposmm.py:E402 # Allow undefined name '__version__' diff --git a/.wci.yml b/.wci.yml index 156e06954..70ea68fe5 100644 --- a/.wci.yml +++ b/.wci.yml @@ -2,8 +2,7 @@ name: libEnsemble icon: https://raw.githubusercontent.com/Libensemble/libensemble/main/docs/images/libE_logo.png headline: Tool for running dynamic ensembles. description: | - libEnsemble is a Python toolkit for coordinating workflows of asynchronous - and dynamic ensembles of calculations. + libEnsemble is a Python toolkit for running dynamic ensembles of calculations. Users write generator and simulator functions to express their ensembles. A library of example functions is available which can be modified as needed. @@ -17,8 +16,8 @@ description: | language: Python release: - version: 0.10.2 - date: 2023-07-24 + version: 1.0.0 + date: 2023-09-25 documentation: general: https://libensemble.readthedocs.io diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9e0a0bebc..4d98d6649 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,75 @@ GitHub issues are referenced, and can be viewed with hyperlinks on the `github r .. _`github releases page`: https://github.com/Libensemble/libensemble/releases +Release 1.0.0 +-------------- + +:Date: September 25, 2023 + +New capabilities: + +* *libE_specs* option `final_gen_send` returns last results to the generator (replaces `final_fields`). #1086 +* *libE_specs* option `reuse_output_dir` allows reuse of workflow and ensemble directories. #1028 #1041 +* *libE_specs* option `calc_dir_id_width` no. of digits for calc ID in output sim/gen directories. #1052 / #1066 +* Added `gen_num_procs` and `gen_num_gpus` *libE_specs* (and *persis_info*) options for resourcing a generator. #1068 +* Added `gpu_env_fallback` option to platform fields - specifies a GPU environment variable (for non-MPI usage). #1050 +* New MPIExecutor `submit()` argument `mpi_runner_type` specifies an MPI runner for current call only. #1054 +* Allow oversubscription when using the `num_procs` *gen_specs["out"]* option. #1058 +* sim/gen_specs can use `outputs` in place of `out` to be consistent with `inputs`. #1075 +* Executor can be obtained from `libE_info` (4th parameter) in user functions. #1078 + +Breaking changes: + +* *libE_specs* option `final_fields` is removed in favor of `final_gen_send`. #1086 +* *libE_specs* option `kill_canceled_sims` now defaults to **False**. #1062 +* *parse_args* is not run automatically by `Ensemble` constructor. + +Updates to **Object Oriented** Ensemble interface: + +* Added `parse_args` as option to `Ensemble` constructor. #1065 +* The *executor* can be passed as an option to the `Ensemble` constructor. #1078 +* Better handling of `Ensemble.add_random_streams` and `ensemble.persis_info`. #1074 + +Output changes: + +* The worker ID suffix is removed from sim/gen output directories. #1041 +* Separate *ensemble.log* and *libE_stats.txt* for different workflows directories. #1027 #1041 +* Defaults to four digits for sim/gen ID in output directories (adds digits on overflow). #1052 / #1066 + +Bug fixes: + +* Resolved PETSc/OpenMPI issue (when using the Executor). #1064 +* Prevent `mpi4py` validation running during local comms (when using OO interface). #1065 + +Performance changes: + +* Optimize `kill_cancelled_sims` function. #1043 / #1063 +* *safe_mode* defaults to **False** (for performance). #1053 + +Updates to example functions: + +* Multiple regression tests and examples ported to use OO ensemble interface. #1014 + +Update forces examples: + +* Make persistent generator the default for both simple and GPU examples (inc. updated tutorials). +* Update to object oriented interface. +* Added separate variable resources example for forces GPU. +* Rename `multi_task` example to `multi_app`. + +Documentation: + +* General overhaul and simplification of documentation. #992 + +:Note: + +* Tested platforms include Linux, MacOS, Windows, and major systems such as Frontier (OLCF), Polaris, and Perlmutter (NERSC). The major system tests ran heterogeneous workflows. +* Tested Python versions: (Cpython) 3.7, 3.8, 3.9, 3.10, 3.11. + +:Known Issues: + +* See known issues section in the documentation. + Release 0.10.2 -------------- diff --git a/docs/FAQ.rst b/docs/FAQ.rst index b80a26927..43ec4680b 100644 --- a/docs/FAQ.rst +++ b/docs/FAQ.rst @@ -4,7 +4,16 @@ Frequently Asked Questions If you have any additional questions, feel free to contact us through Support_. -.. _Support: https://github.com/Libensemble/libensemble#resources +.. _Support: https://libensemble.readthedocs.io/en/main/introduction.html#resources + +Debugging +--------- + +We recommend using the following options to help debug workflows:: + + from libensemble import logger + logger.set_level("DEBUG") + libE_specs["safe_mode"] = True Common Errors ------------- @@ -29,7 +38,7 @@ Common Errors one worker, leaving none to run simulation functions. - An error in the allocation function. For example, perhaps the allocation - waiting for all requested evaluations to be returned (e.g, before starting a + waiting for all requested evaluations to be returned (e.g., before starting a new generator), but this condition is not returning True even though all scheduled evaluations have returned. This can be due to incorrect implementation (e.g., it has not considered points that @@ -126,13 +135,18 @@ HPC Errors and Questions This has been observed with the OFA fabric when using mpi4py and usually indicates MPI messages aren't being received correctly. The solution - is to either switch fabric or turn off matching probes. See the answer for "Why + is to either switch fabric or turn off matching probes. See the answer to "Why does libEnsemble hang on certain systems when running with MPI?" For more information see https://bitbucket.org/mpi4py/mpi4py/issues/102/unpicklingerror-on-commrecv-after-iprobe. .. dropdown:: **srun: Job \*\*\*\*\*\* step creation temporarily disabled, retrying (Requested nodes are busy)** + Note that this message has been observed on Perlmutter when none of the problems + below are present, and is likely caused by interference with system processes + that run between tasks. In this case, it may cause overhead but does not prevent + correct functioning. + When running on a SLURM system, this implies that you are trying to run on a resource that is already dedicated to another task. The reason can vary, some reasons are: @@ -251,7 +265,7 @@ macOS and Windows Errors **"RuntimeError: An attempt has been made to start a new process... this probably means that you are not using fork... " if __name__ == "__main__": freeze_support() ...** - You need to place your main entrypoint code underneath an ``if __name__ == "__main__":`` block. + You need to place your main entry point code underneath an ``if __name__ == "__main__":`` block. Explanation: Python chooses one of three methods to start new processes when using multiprocessing (``--comms local`` with libEnsemble). These are ``"fork"``, ``"spawn"``, and ``"forkserver"``. ``"fork"`` diff --git a/docs/_static/libE_logo.png b/docs/_static/libE_logo.png deleted file mode 120000 index 94a5e7772..000000000 --- a/docs/_static/libE_logo.png +++ /dev/null @@ -1 +0,0 @@ -../images/libE_logo.png \ No newline at end of file diff --git a/docs/_static/libE_logo_white.png b/docs/_static/libE_logo_white.png deleted file mode 120000 index 382961b5e..000000000 --- a/docs/_static/libE_logo_white.png +++ /dev/null @@ -1 +0,0 @@ -../images/libE_logo_white.png \ No newline at end of file diff --git a/docs/advanced_installation.rst b/docs/advanced_installation.rst index 4837c1897..eaf7f443d 100644 --- a/docs/advanced_installation.rst +++ b/docs/advanced_installation.rst @@ -13,7 +13,7 @@ automatically installed alongside libEnsemble: * pyyaml_ ``>= v6.0`` * tomli_ ``>= 1.2.1`` -In view of libEnsemble's compiled dependencies, the following installation +Given libEnsemble's compiled dependencies, the following installation methods each offer a trade-off between convenience and the ability to customize builds, including platform-specific optimizations. @@ -103,8 +103,8 @@ Further recommendations for selected HPC systems are given in the spack install py-libensemble The above command will install the latest release of libEnsemble with - the required dependencies only. There are other optional - dependencies that can be specified through variants. The following + the required dependencies only. Other optional + dependencies can be specified through variants. The following line installs libEnsemble version 0.7.2 with some common variants (e.g., using :doc:`APOSMM<../examples/aposmm>`): @@ -122,12 +122,12 @@ Further recommendations for selected HPC systems are given in the spack install py-libensemble +scipy +mpmath +petsc4py ^py-petsc4py~mpi ^petsc~mpi~hdf5~hypre~superlu-dist - The install will create modules for libEnsemble and the dependent + The installation will create modules for libEnsemble and the dependent packages. These can be loaded by running:: spack load -r py-libensemble - Any Python packages will be added to the PYTHONPATH, when the modules are loaded. If you do not have + Any Python packages will be added to the PYTHONPATH when the modules are loaded. If you do not have modules on your system you may need to install ``lmod`` (also available in Spack):: spack install lmod @@ -181,7 +181,7 @@ The following packages may be installed separately to enable additional features .. _Balsam: https://balsam.readthedocs.io/en/latest/ .. _conda-forge: https://conda-forge.org/ .. _Conda: https://docs.conda.io/en/latest/ -.. _Globus Compute: https://funcx.readthedocs.io/en/latest/ +.. _Globus Compute: https://www.globus.org/compute .. _GitHub: https://github.com/Libensemble/libensemble .. _MPICH: https://www.mpich.org/ .. _NumPy: http://www.numpy.org diff --git a/docs/data_structures/libE_specs.rst b/docs/data_structures/libE_specs.rst index 89567bc7a..9bb355569 100644 --- a/docs/data_structures/libE_specs.rst +++ b/docs/data_structures/libE_specs.rst @@ -25,13 +25,13 @@ libEnsemble is primarily customized by setting options within a ``LibeSpecs`` cl .. tab-item:: General "comms" [str] = ``"mpi"``: - Manager/Worker communications mode. ``'mpi'``, ``'local'``, or ``'tcp'`` + Manager/Worker communications mode: ``'mpi'``, ``'local'``, or ``'tcp'``. "nworkers" [int]: Number of worker processes in ``"local"`` or ``"tcp"``. "mpi_comm" [MPI communicator] = ``MPI.COMM_WORLD``: libEnsemble MPI communicator. "dry_run" [bool] = ``False``: - Whether libEnsemble should immediately exit after validating all inputs + Whether libEnsemble should immediately exit after validating all inputs. "abort_on_exception" [bool] = ``True``: In MPI mode, whether to call ``MPI_ABORT`` on an exception. If ``False``, an exception will be raised by the manager. @@ -45,7 +45,7 @@ libEnsemble is primarily customized by setting options within a ``LibeSpecs`` cl On libEnsemble shutdown, number of seconds after which workers considered timed out, then terminated. "kill_canceled_sims" [bool] = ``False``: - Try to kill sims with ``"cancel_requested"`` set ``True``. + Try to kill sims with ``"cancel_requested"`` set to ``True``. If ``False``, the manager avoids this moderate overhead. "disable_log_files" [bool] = ``False``: Disable ``ensemble.log`` and ``libE_stats.txt`` log files. @@ -74,7 +74,7 @@ libEnsemble is primarily customized by setting options within a ``LibeSpecs`` cl "ensemble_copy_back" [bool] = ``False``: Whether to copy back contents of ``ensemble_dir_path`` to launch - location. Useful if ``ensemble_dir_path`` located on node-local storage. + location. Useful if ``ensemble_dir_path`` is located on node-local storage. "reuse_output_dir" [bool] = ``False``: Whether to allow overwrites and access to previous ensemble and workflow directories in subsequent runs. diff --git a/docs/data_structures/persis_info.rst b/docs/data_structures/persis_info.rst index ea7aa54c0..841090298 100644 --- a/docs/data_structures/persis_info.rst +++ b/docs/data_structures/persis_info.rst @@ -13,7 +13,7 @@ and from the corresponding workers. These are received in the ``persis_info`` argument of user functions, and returned as the optional second return value. A typical example is a random number generator stream to be used in consecutive -calls to a generator (see function +calls to a generator (see :meth:`add_unique_random_streams()`) All other entries persist on the manager and can be updated in the calling script @@ -55,7 +55,7 @@ Examples: .. literalinclude:: ../../libensemble/alloc_funcs/start_only_persistent.py :linenos: :start-at: if gen_count < persis_info.get("num_gens_started", 0): - :end-before: # Give evaluated results back to a running persistent gen + :end-before: # Give evaluated results back to the persistent gen :emphasize-lines: 1 :caption: libensemble/alloc_funcs/start_only_persistent.py diff --git a/docs/data_structures/platform_specs.rst b/docs/data_structures/platform_specs.rst index 329ed9127..35198535f 100644 --- a/docs/data_structures/platform_specs.rst +++ b/docs/data_structures/platform_specs.rst @@ -5,13 +5,13 @@ Platform Specs libEnsemble detects platform specifications including MPI runners and resources. Usually this will result in the correct settings. However, users can configure -platform specification via the `platform_specs`_ option or indicate a known +platform specifications via the `platform_specs`_ option or indicate a known platform via the `platform`_ option. platform_specs -------------- -A Platform object or dictionary specifying settings for a platform. +A ``Platform`` object or dictionary specifying settings for a platform. To define a platform (in calling script): @@ -47,7 +47,7 @@ To define a platform (in calling script): "scheduler_match_slots": False, } -The list of platform fields are given below. Any fields not given, will be +The list of platform fields is given below. Any fields not given will be auto-detected by libEnsemble. .. _platform-fields: @@ -76,15 +76,15 @@ See :ref:`known platforms`. platform -------- -A string giving the name of a known platform defined in the platforms module. +A string giving the name of a known platform defined in the platforms module. .. code-block:: python libE_specs["platform"] = "perlmutter_g" -Note: the environment variable LIBE_PLATFORM is an alternative way of setting. +Note: the environment variable ``LIBE_PLATFORM`` is an alternative way of setting. -E.g., on command line or batch submission script: +E.g., in the command line or batch submission script: .. code-block:: shell diff --git a/docs/examples/aposmm.rst b/docs/examples/aposmm.rst index 4037dfbb6..9ecdbdb57 100644 --- a/docs/examples/aposmm.rst +++ b/docs/examples/aposmm.rst @@ -14,7 +14,7 @@ Configuring APOSMM APOSMM works with a choice of optimizers, some requiring external packages. To import the optimization packages (and their dependencies) at a global level -(recommended), add the following lines in the calling script, before importing +(recommended), add the following lines in the calling script before importing APOSMM:: import libensemble.gen_funcs @@ -26,8 +26,8 @@ where ``optimizers`` is a string (or list of strings) from the available options .. dropdown:: Issues with ensemble hanging or failed simulations? - Note, that if using **mpi4py** comms, PETSc must be imported at the global - level, or the ensemble may hang. + Note that if using **mpi4py** comms, PETSc must be imported at the global + level or the ensemble may hang. Exception: In the case that you are using the MPIExecutor or other MPI inside a user function and you are using Open MPI, then you must: diff --git a/docs/executor/executor.rst b/docs/executor/executor.rst index f7739038d..c3b68fa92 100644 --- a/docs/executor/executor.rst +++ b/docs/executor/executor.rst @@ -43,11 +43,11 @@ See the Executor APIs for optional arguments. :task.process: (process obj) The process object used by the underlying process manager (e.g., return value of subprocess.Popen). - :task.errcode: (int) The errorcode/return code used by the underlying process manager. + :task.errcode: (int) The error code (or return code) used by the underlying process manager. :task.finished: (boolean) True means task has finished running - not whether it was successful. - :task.success: (boolean) Did task complete successfully (e.g., the returncode is zero)? + :task.success: (boolean) Did task complete successfully (e.g., the return code is zero)? :task.runtime: (int) Time in seconds that task has been running. - :task.submit_time: (int) Time since epoch that task was submitted + :task.submit_time: (int) Time since epoch that task was submitted. :task.total_time: (int) Total time from task submission to completion (only available when task is finished). Run configuration attributes - some will be autogenerated: diff --git a/docs/executor/overview.rst b/docs/executor/overview.rst index fe7f4a54c..32dbcd105 100644 --- a/docs/executor/overview.rst +++ b/docs/executor/overview.rst @@ -90,8 +90,8 @@ Example use-cases: See the :doc:`Executor` or :doc:`MPIExecutor` interface for the complete API. -See :doc:`Running on HPC Systems<../platforms/platforms_index>` to see, with -diagrams, how common options such as ``libE_specs["dedicated_mode"]`` affect the +See :doc:`Running on HPC Systems<../platforms/platforms_index>` for illustrations +of how common options such as ``libE_specs["dedicated_mode"]`` affect the run configuration on clusters and supercomputers. Advanced Features @@ -105,8 +105,8 @@ In simulation function (sim_f). import time - def sim_func(H, persis_info, sim_specs, libE_info): + def sim_func(H, persis_info, sim_specs, libE_info): input_param = str(int(H["x"][0][0])) exctr = libE_info["executor"] @@ -121,8 +121,7 @@ In simulation function (sim_f). timeout_sec = 600 poll_delay_sec = 1 - while(not task.finished): - + while not task.finished: # Has manager sent a finish signal if exctr.manager_kill_received(): task.kill() @@ -153,14 +152,13 @@ In simulation function (sim_f). .. ... Users who wish to poll only for manager kill signals and timeouts don't necessarily -need to construct a polling loop like above, but can instead use an the ``Executor`` +need to construct a polling loop like above, but can instead use the ``Executor`` built-in ``polling_loop()`` method. An alternative to the above simulation function may resemble: .. code-block:: python def sim_func(H, persis_info, sim_specs, libE_info): - input_param = str(int(H["x"][0][0])) exctr = libE_info["executor"] diff --git a/docs/function_guides/allocator.rst b/docs/function_guides/allocator.rst index e583a5891..a65c404ab 100644 --- a/docs/function_guides/allocator.rst +++ b/docs/function_guides/allocator.rst @@ -11,7 +11,7 @@ The ``alloc_f`` is unique since it is called by libEnsemble's manager instead of For allocation functions, as with the other user functions, the level of complexity can vary widely. We encourage experimenting with: - 1. Prioritization of simulations, + 1. Prioritization of simulations 2. Sending results immediately or in batch 3. Assigning varying resources to evaluations @@ -26,16 +26,16 @@ Most ``alloc_f`` function definitions written by users resemble:: where: - * :ref:`W` is an array containing worker state info, + * :ref:`W` is an array containing worker state info * :ref:`H` is the *trimmed* History array, containing rows from the generator * :ref:`libE_info` is a set of statistics to determine the progress of work or exit conditions -Most users first check that its appropriate to allocate work:: +Most users first check that it is appropriate to allocate work:: if libE_info["sim_max_given"] or not libE_info["any_idle_workers"]: return {}, persis_info -If allocation is to continue, a support class is instantiated and a +If the allocation is to continue, a support class is instantiated and a :ref:`Work dictionary` is initialized:: manage_resources = "resource_sets" in H.dtype.names or libE_info["use_resource_sets"] @@ -76,10 +76,10 @@ dictionary values to give to those workers: This Work dictionary instructs each worker to call the ``sim_f`` (``tag: 1``) with data from ``"x"`` and a given ``"H_row"`` from the -History array. A worker specific ``persis_info`` is also given. +History array. A worker-specific ``persis_info`` is also given. Constructing these arrays and determining which workers are available -for receiving data is simplified by use of the ``AllocSupport`` class +for receiving data is simplified by the ``AllocSupport`` class available within the ``libensemble.tools.alloc_support`` module: .. dropdown:: AllocSupport @@ -92,7 +92,7 @@ available within the ``libensemble.tools.alloc_support`` module: .. automethod:: __init__ The Work dictionary is returned to the manager alongside ``persis_info``. If ``1`` -is returned as third value, this instructs the ensemble to stop. +is returned as the third value, this instructs the ensemble to stop. .. note:: An error occurs when the ``alloc_f`` returns nothing while all workers are idle @@ -124,7 +124,7 @@ allocation function and detect impending timeouts, then pack up cleanup work req or mark points for cancellation. The remaining values above are useful for efficient filtering of H values -(e.g., ``sim_ended_count``), saves a filtering an entire column of H. +(e.g., ``sim_ended_count`` saves filtering by an entire column of H.) Descriptions of included allocation functions can be found :doc:`here<../examples/alloc_funcs>`. The default allocation function is diff --git a/docs/function_guides/function_guide_index.rst b/docs/function_guides/function_guide_index.rst index 95aa107bc..621bf36d2 100644 --- a/docs/function_guides/function_guide_index.rst +++ b/docs/function_guides/function_guide_index.rst @@ -2,8 +2,8 @@ Writing User Functions ====================== -User functions typically only require some familiarity with NumPy_, but if they conform to -the :ref:`user function APIs`, they can incorporate any other machine-learning, +User functions typically require only some familiarity with NumPy_, but if they conform to +the :ref:`user function APIs`, they can incorporate methods from machine-learning, mathematics, resource management, or other libraries/applications. These guides describe common development patterns and optional components: diff --git a/docs/function_guides/generator.rst b/docs/function_guides/generator.rst index b319159eb..c8d0b3ef6 100644 --- a/docs/function_guides/generator.rst +++ b/docs/function_guides/generator.rst @@ -8,7 +8,6 @@ Generator and :ref:`Simulator functions` have relatively similar .. code-block:: python def my_generator(Input, persis_info, gen_specs, libE_info): - batch_size = gen_specs["user"]["batch_size"] Output = np.zeros(batch_size, gen_specs["out"]) @@ -75,7 +74,7 @@ Persistent Generators While non-persistent generators return after completing their calculation, persistent generators do the following in a loop: - 1. Receive simulation results and metadata. Exit if metadata instructs + 1. Receive simulation results and metadata; exit if metadata instructs 2. Perform analysis 3. Send subsequent simulation parameters @@ -84,7 +83,7 @@ more complicated. The :doc:`APOSMM<../examples/aposmm>` optimization generator function included with libEnsemble is persistent so it can maintain multiple local optimization subprocesses based on results from complete simulations. -Use ``gen_specs["persis_in"]`` to specify fields to send back to the generator throughout runtime. +Use ``gen_specs["persis_in"]`` to specify fields to send back to the generator throughout the run. ``gen_specs["in"]`` only describes the input fields when the function is **first called**. Functions for a persistent generator to communicate directly with the manager @@ -160,7 +159,7 @@ a worker can be initiated in *active receive* mode by the allocation function (see :ref:`start_only_persistent`). The persistent worker can then send and receive from the manager at any time. -Ensure there are no communication deadlocks in this mode. In manager/worker message exchanges, only the worker-side +Ensure there are no communication deadlocks in this mode. In manager--worker message exchanges, only the worker-side receive is blocking by default (a non-blocking option is available). Cancelling Simulations @@ -173,7 +172,7 @@ Previously submitted simulations can be cancelled by sending a message to the ma - If a generated point is cancelled by the generator **before sending** to another worker for simulation, then it won't be sent. - If that point has **already been evaluated** by a simulation, the ``cancel_requested`` field will remain ``True``. -- If that point is **currently being evaluated**, a kill signal will be sent to the corresponding worker. It must be manually processed in the simulation function +- If that point is **currently being evaluated**, a kill signal will be sent to the corresponding worker; it must be manually processed in the simulation function. The :doc:`Borehole Calibration tutorial<../tutorials/calib_cancel_tutorial>` gives an example of the capability to cancel pending simulations. @@ -202,7 +201,7 @@ Generator initiated shutdown If using a supporting allocation function, the generator can prompt the ensemble to shutdown by simply exiting the function (e.g., on a test for a converged value). For example, the allocation function :ref:`start_only_persistent` closes down -the ensemble as soon a persistent generator returns. The usual return values should be given. +the ensemble as soon as a persistent generator returns. The usual return values should be given. Examples -------- diff --git a/docs/function_guides/history_array.rst b/docs/function_guides/history_array.rst index f5133aa82..6820b6fae 100644 --- a/docs/function_guides/history_array.rst +++ b/docs/function_guides/history_array.rst @@ -60,7 +60,7 @@ array returned by libEnsemble. .. points are received. .. The reserved boolean field ``cancel_requested`` can also be set in a user -.. function to request that libEnsemble cancels evaluation of the point. +.. function to request that libEnsemble cancels the evaluation of the point. .. The remaining reserved fields are protected (populated by libEnsemble), and .. store information about each entry. These include boolean fields for the diff --git a/docs/function_guides/simulator.rst b/docs/function_guides/simulator.rst index 728675f4b..35da79226 100644 --- a/docs/function_guides/simulator.rst +++ b/docs/function_guides/simulator.rst @@ -8,7 +8,6 @@ Simulator and :ref:`Generator functions` have relatively similar .. code-block:: python def my_simulation(Input, persis_info, sim_specs, libE_info): - batch_size = sim_specs["user"]["batch_size"] Output = np.zeros(batch_size, sim_specs["out"]) @@ -78,7 +77,7 @@ Although comparatively uncommon, simulator functions can also be written in a persistent fashion. See the :ref:`here` for a general API overview of writing persistent generators, since the interface is largely identical. The only differences are to pass ``EVAL_SIM_TAG`` when instantiating a ``PersistentSupport`` -class instance, and to return ``FINISHED_PERSISTENT_SIM_TAG`` when the simulator +class instance and to return ``FINISHED_PERSISTENT_SIM_TAG`` when the simulator function returns. .. note:: diff --git a/docs/introduction.rst b/docs/introduction.rst index 0b1e49e49..049044f40 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -20,29 +20,30 @@ and an exit condition. Run the following via ``python this_file.py --comms local from libensemble.specs import ExitCriteria, GenSpecs, SimSpecs from libensemble.tools import add_unique_random_streams - sampling = Ensemble(parse_args=True) - sampling.sim_specs = SimSpecs( - sim_f=six_hump_camel, - inputs=["x"], - outputs=[("f", float)], - ) - sampling.gen_specs = GenSpecs( - gen_f=uniform_random_sample, - outputs=[("x", float, (2,))], - user={ - "gen_batch_size": 500, - "lb": np.array([-3, -2]), - "ub": np.array([3, 2]), - }, - ) - - sampling.persis_info = add_unique_random_streams({}, sampling.nworkers + 1) - sampling.exit_criteria = ExitCriteria(sim_max=101) - if __name__ == "__main__": + sampling = Ensemble(parse_args=True) + sampling.sim_specs = SimSpecs( + sim_f=six_hump_camel, + inputs=["x"], + outputs=[("f", float)], + ) + sampling.gen_specs = GenSpecs( + gen_f=uniform_random_sample, + outputs=[("x", float, (2,))], + user={ + "gen_batch_size": 500, + "lb": np.array([-3, -2]), + "ub": np.array([3, 2]), + }, + ) + + sampling.persis_info = add_unique_random_streams({}, sampling.nworkers + 1) + sampling.exit_criteria = ExitCriteria(sim_max=101) sampling.run() sampling.save_output(__file__) - print("Some output data:\n", sampling.H[["x", "f"]][:10]) + + if sampling.is_manager: + print("Some output data:\n", sampling.H[["x", "f"]][:10]) See the :doc:`tutorial` for a step-by-step beginners guide. diff --git a/docs/introduction_latex.rst b/docs/introduction_latex.rst index f88f6def4..c85edebbc 100644 --- a/docs/introduction_latex.rst +++ b/docs/introduction_latex.rst @@ -1,6 +1,5 @@ .. include:: introduction.rst -.. _across: https://libensemble.readthedocs.io/en/develop/platforms/platforms_index.html#funcx-remote-user-functions .. _APOSMM: https://link.springer.com/article/10.1007/s12532-017-0131-4 .. _AWA: https://link.springer.com/article/10.1007/s12532-017-0131-4 .. _Balsam: https://balsam.readthedocs.io/en/latest/ @@ -14,7 +13,6 @@ .. _DFO-LS: https://github.com/numericalalgorithmsgroup/dfols .. _ECNoise: https://www.mcs.anl.gov/~wild/cnoise/ .. _example user scripts: https://libensemble.readthedocs.io/en/main/examples/examples_index.html -.. _funcX: https://funcx.org/ .. _GitHub: https://github.com/Libensemble/libensemble .. _GitHub Actions: https://github.com/Libensemble/libensemble/actions .. _IPAC manuscript: https://doi.org/10.18429/JACoW-ICAP2018-SAPAF03 diff --git a/docs/overview_usecases.rst b/docs/overview_usecases.rst index 387820627..c711206cb 100644 --- a/docs/overview_usecases.rst +++ b/docs/overview_usecases.rst @@ -49,7 +49,7 @@ to support): * A user has a ``gen_f`` that produces meshes for a ``sim_f``. Given the ``sim_f`` output, the ``gen_f`` can refine a mesh or produce a new mesh. libEnsemble can ensure that the calculated meshes can be - used by multiple simulations without requiring movement of data. + used by multiple simulations without requiring moving data. * A user wants to evaluate a simulation ``sim_f`` with different sets of parameters, each drawn from a set of possible values. Some parameter values @@ -60,7 +60,7 @@ to support): * A user has a simulation ``sim_f`` that requires calculating multiple expensive quantities, some of which depend on other quantities. The ``sim_f`` - can observe intermediate quantities in order to stop related calculations and + can observe intermediate quantities to stop related calculations and preempt future calculations associated with poor parameter values. * A user has a ``sim_f`` with multiple fidelities, with the higher-fidelity @@ -83,7 +83,7 @@ Glossary ~~~~~~~~ Here we define some terms used throughout libEnsemble's code and documentation. -Although many of these terms seem straight-forward, defining such terms assists +Although many of these terms seem straightforward, defining such terms assists with keeping confusion to a minimum when communicating about libEnsemble and its capabilities. @@ -104,7 +104,7 @@ its capabilities. and ``gen_f`` functions are also commonly configured and parameterized here. * **User function**: A generator, simulator, or allocation function. These - are python functions that govern the libEnsemble workflow. They + are Python functions that govern the libEnsemble workflow. They must conform to the libEnsemble API for each respective user function, but otherwise can be created or modified by the user. libEnsemble comes with many examples of each type of user function. @@ -114,7 +114,7 @@ its capabilities. There are multiple executors including the ``MPIExecutor`` and ``BalsamExecutor``. The base ``Executor`` class allows local sub-processing of serial tasks. - * **Submit**: Enqueue or indicate that one or more jobs or tasks needs to be + * **Submit**: Enqueue or indicate that one or more jobs or tasks need to be launched. When using the libEnsemble Executor, a *submitted* task is executed immediately or queued for execution. @@ -125,13 +125,13 @@ its capabilities. * **Persistent**: Typically, a worker communicates with the manager before and after initiating a user ``gen_f`` or ``sim_f`` calculation. However, user functions may also be constructed to communicate directly with the manager, - for example, in order to efficiently maintain and update data structures instead of + for example, to efficiently maintain and update data structures instead of communicating them between manager and worker. These calculations and the workers assigned to them are referred to as *persistent*. * **Resource Manager** libEnsemble has a built-in resource manager that can detect (or be provided with) a set of resources (e.g., a node-list). Resources are - divided up amongst workers (using *resource sets*), and can be dynamically + divided up amongst workers (using *resource sets*) and can be dynamically reassigned. * **Resource Set**: The smallest unit of resources that can be assigned (and diff --git a/docs/platforms/platforms_index.rst b/docs/platforms/platforms_index.rst index e0cb3cfee..15e9a63d5 100644 --- a/docs/platforms/platforms_index.rst +++ b/docs/platforms/platforms_index.rst @@ -52,7 +52,7 @@ If the argument ``libE_specs["dedicated_mode"]=True`` is used when initializing that is running a libEnsemble manager or worker will be removed from the node-list available to the workers, ensuring libEnsemble has dedicated nodes. -To run in central mode using a 5 node allocation with 4 workers: From the head node +To run in central mode using a 5-node allocation with 4 workers: From the head node of the allocation:: mpirun -np 5 python myscript.py @@ -85,11 +85,11 @@ Systems with Launch/MOM Nodes Some large systems have a 3-tier node setup. That is, they have a separate set of launch nodes (known as MOM nodes on Cray Systems). User batch jobs or interactive sessions run on a launch node. -Most such systems supply a special MPI runner which has some application-level scheduling +Most such systems supply a special MPI runner that has some application-level scheduling capability (e.g., ``aprun``, ``jsrun``). MPI applications can only be submitted from these nodes. Examples -of these systems include: Summit, Sierra and Theta. +of these systems include: Summit, Sierra, and Theta. -There are two ways of running libEnsemble on these kind of systems. The first, and simplest, +There are two ways of running libEnsemble on these kinds of systems. The first, and simplest, is to run libEnsemble on the launch nodes. This is often sufficient if the worker's simulation or generation functions are not doing much work (other than launching applications). This approach is inherently centralized. The entire node allocation is available for the worker-launched @@ -115,7 +115,7 @@ or *to entirely different systems*. (New) Multi-System: libEnsemble + BalsamExecutor -Submission scripts for running on launch/MOM nodes and for using Balsam, can be be found in +Submission scripts for running on launch/MOM nodes and for using Balsam, can be found in the :doc:`examples`. Mapping Tasks to Resources @@ -144,7 +144,7 @@ libE_specs option. When using the MPI Executor, it is possible to override the detected information using the `custom_info` argument. See the :doc:`MPI Executor<../executor/mpi_executor>` for more. -.. _funcx_ref: +.. _globus_compute_ref: Globus Compute - Remote User Functions -------------------------------------- @@ -152,10 +152,10 @@ Globus Compute - Remote User Functions *Alternatively to much of the above*, if libEnsemble is running on some resource with internet access (laptops, login nodes, other servers, etc.), workers can be instructed to launch generator or simulator user function instances to separate resources from -themselves via `Globus Compute`_, a distributed, high-performance function-as-a-service platform: +themselves via `Globus Compute`_ (formerly funcX), a distributed, high-performance function-as-a-service platform: .. image:: ../images/funcxmodel.png - :alt: running_with_funcx + :alt: running_with_globus_compute :scale: 50 :align: center @@ -196,7 +196,7 @@ argument. For example:: globus_compute_endpoint = "3af6dc24-3f27-4c49-8d11-e301ade15353", ) -See the ``libensemble/tests/scaling_tests/funcx_forces`` directory for a complete +See the ``libensemble/tests/scaling_tests/globus_compute_forces`` directory for a complete remote-simulation example. Instructions for Specific Platforms diff --git a/docs/platforms/srun.rst b/docs/platforms/srun.rst index 254574308..5ec8a6483 100644 --- a/docs/platforms/srun.rst +++ b/docs/platforms/srun.rst @@ -4,7 +4,7 @@ libEnsemble with SLURM SLURM is a popular open-source workload manager. -libEnsemble is able to read SLURM node lists and partition these to workers. By +libEnsemble can read SLURM node lists and partition these to workers. By default this is done by :ref:`reading an environment variable`. Example SLURM submission scripts for various systems are given in the @@ -50,12 +50,12 @@ when assigning more than one worker to any given node. .. dropdown:: **GTL_DEBUG: [0] cudaHostRegister: no CUDA-capable device is detected** If using the environment variable ``MPICH_GPU_SUPPORT_ENABLED``, then ``srun`` commands may - expect an option for allocating GPUs (e.g., ``--gpus-per-task=1`` would + expect an option for allocating GPUs (e.g., ``--gpus-per-task=1`` would allocate one GPU to each MPI task of the MPI run). It is recommended that tasks submitted via the :doc:`MPIExecutor<../executor/mpi_executor>` specify this in the ``extra_args`` option to the ``submit`` function (rather than using an ``#SBATCH`` command). - If running the libEnsemble user calling script with ``srun``, then it is recommended that + If running the libEnsemble calling script with ``srun``, then it is recommended that ``MPICH_GPU_SUPPORT_ENABLED`` is set in the user ``sim_f`` or ``gen_f`` function where GPU runs will be submitted, instead of in the batch script. For example:: diff --git a/docs/resource_manager/overview.rst b/docs/resource_manager/overview.rst index 342e0a1c1..f28d5e77e 100644 --- a/docs/resource_manager/overview.rst +++ b/docs/resource_manager/overview.rst @@ -68,7 +68,7 @@ along with the work request (simulation). In the calling script, use a ``gen_specs["out"]`` field called ``resource_sets``: .. code-block:: python - :emphasize-lines: 4 + :emphasize-lines: 6 gen_specs = { "gen_f": gen_f, @@ -102,7 +102,7 @@ When the allocation function assigns the points to workers for evaluation, it will check if the requested number of resource sets are available for each point to evaluate. If they are not available, then the evaluation will not be given to a worker until enough resources become available. This functionality is built -into the supplied allocation functions, and generally requires no modification +into the supplied allocation functions and generally requires no modification from the user. .. image:: ../images/variable_resources2.png @@ -112,7 +112,7 @@ from the user. The particular nodes and slots assigned to each worker will be determined by the libEnsenble :doc:`built-in scheduler`, although users can provide an alternative scheduler via the :doc:`allocation function<../function_guides/allocator>`. -In short, the scheduler will preference fitting simulations onto a node, and using +In short, the scheduler will prefer fitting simulations onto a node, and using even splits across nodes, if necessary. Accessing resources from the simulation function @@ -133,7 +133,7 @@ For example, in *CUDA_variable_resources*, the environment variable ``CUDA_VISIBLE_DEVICES`` is set to slots: .. code-block:: python - :emphasize-lines: 3 + :emphasize-lines: 2 resources = Resources.resources.worker_resources resources.set_env_to_slots("CUDA_VISIBLE_DEVICES") # Use convenience function. @@ -169,7 +169,7 @@ and can be set by a dictionary supplied via ``libE_specs["scheduler_opts"]`` **match_slots** [boolean]: When splitting resource sets across multiple nodes, slot IDs must match. Useful if setting an environment variable such as ``CUDA_VISIBLE_DEVICES`` - to specific slots counts, which should match over multiple nodes. + to specific slot counts, which should match over multiple nodes. Default: True In the following example, assume the next simulation requires **four** resource @@ -195,17 +195,17 @@ Varying generator resources By default, generators are not allocated resources in dynamic mode. Fixed resources for the generator can be set using the *libE_specs* options -``gen_num_procs`` and ``gen_num_gpus``, which takes an integer value. -If only ``gen_num_gpus`` is set, then number of processors will match. +``gen_num_procs`` and ``gen_num_gpus``, which take integer values. +If only ``gen_num_gpus`` is set, then the number of processors will be set to match. To vary generator resources, ``persis_info`` settings can be used in allocation functions before calling the ``gen_work`` support function. This takes the -same options (``gen_num_procs`` and ``gen_num_gpus``) +same options (``gen_num_procs`` and ``gen_num_gpus``). Alternatively, the setting ``persis_info["gen_resources"]`` can also be set to a number of resource sets. -Note that persistent workers maintain their resources until coming out of a +Note that persistent workers maintain their resources until they come out of a persistent state. Example scenarios @@ -214,7 +214,7 @@ Example scenarios Persistent generator ^^^^^^^^^^^^^^^^^^^^ -You have *one* persistent generator and want *eight* workers for running concurrent +You have *one* persistent generator and want *eight* workers to run concurrent simulations. In this case you can run with *nine* workers. Either explicitly set eight resource sets (recommended): @@ -223,13 +223,13 @@ Either explicitly set eight resource sets (recommended): libE_specs["num_resource_sets"] = 8 -Or if the generator should always be the same worker, use one zero resource worker: +Or if the generator should always be the same worker, use one zero-resource worker: .. code-block:: python libE_specs["zero_resource_workers"] = [1] -For the second option, an allocation function supporting zero resource workers must be used. +For the second option, an allocation function supporting zero-resource workers must be used. Using the two-node example above, the initial worker mapping in this example will be: diff --git a/docs/resource_manager/zero_resource_workers.rst b/docs/resource_manager/zero_resource_workers.rst index de176e654..fe8c243eb 100644 --- a/docs/resource_manager/zero_resource_workers.rst +++ b/docs/resource_manager/zero_resource_workers.rst @@ -52,8 +52,8 @@ In general, the number of resource sets should be set to enable the maximum concurrency desired by the ensemble, taking into account generators and simulators. Users can set generator resources using the *libE_specs* options -``gen_num_procs`` and/or ``gen_num_gpus``, which take an integer values. -If only ``gen_num_gpus`` is set, then number of processors will match. +``gen_num_procs`` and/or ``gen_num_gpus``, which take integer values. +If only ``gen_num_gpus`` is set, then the number of processors is set to match. To vary generator resources, ``persis_info`` settings can be used in allocation functions before calling the ``gen_work`` support function. This takes the @@ -71,7 +71,7 @@ the other will have two. Placing zero-resource functions on a fixed worker ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If the generator must must always be on worker one, then instead of using +If the generator must always be on worker one, then instead of using ``num_resource_sets``, use the ``zero_resource_workers`` *libE_specs* option: .. code-block:: python diff --git a/docs/rst_formatting_guidelines b/docs/rst_formatting_guidelines index d94ee7a55..0bc7933c2 100644 --- a/docs/rst_formatting_guidelines +++ b/docs/rst_formatting_guidelines @@ -1,37 +1,44 @@ -Put variables (e.g., gen_f or H) in between double ticks ``stuff here`` (no quotes) +GENERAL +------- -Code or monospaced values that contain quotes should only use double-quotes ("stuff_here") + Put variables (e.g., gen_f or H) in between double ticks ``stuff here`` (no quotes). -Documented classes and functions should be linked to using :class: and :meth: respectively. Ensure the entire module path is used. + Code or monospaced values that contain quotes should only use double-quotes ("stuff_here"). -Links to other pages should *preferably* use :ref:, so references persist even when files are moved. + Use italics for emphasis except within links. -Put short code in between double ticks ``stuff here`` (no quotes) + Put short code, file-names, and environment variables within ``double ticks`` (no quotes). -Put referenced file names in between double ticks ``stuff here`` (no quotes) + "where" should be lowercased when describing variables in a formula or code sample. -Put environment variables in between double ticks ``stuff here`` (no quotes) + Use tabs to compare multiple approaches or categorize content. -Use .. note:: and .. warning:: directives + Use dropdowns for multiple long-content sections on the same page. -Put breakout code snippets into a box using two colons :: + Acronym for "example": "e.g.," (note the final period and comma). Acronym for "number of": "No. of" (note the period after "No"). -No italics on text of links + Use semicolons; they're helpful for two smaller sentences describing the same topic. -Use italics for emphasis +CODE DOCSTRINGS AND TYPE HINTS +------------------------------ -Ensure parameter types, return types, and contents of :obj:`label`, :class:`label`, and other instances are "resolvable" (importable/parsable by Python) to the best of your ability. If not, append to "nitpicky" + Ensure parameter types, return types, and contents of :obj:`label`, :class:`label`, and other instances are "resolvable" (importable/parsable by Python) to the best of your ability. If not, append to "nitpicky". -Ensure global types in docstrings match their builtin/typing class or name; "string" should be "str", "optional" should be "Optional" + Ensure global types in docstrings match their builtin/typing class or name; "string" should be "str", "optional" should be "Optional". -"list of object" should be "List[object]" + "List of object" should be "List[object]". -Docstrings for parameters or fields should end with periods + List-items of parameters, fields, or other items should end with periods. -Subsections should be Title Cased +DOCS PAGES +---------- -"where" should be lowercased when describing variables in a formula or code sample + Subsections should be Title Cased. -Use tabs to compare multiple approaches or categorize content + Documented classes and functions should be linked to using :class: and :meth: respectively. Ensure the entire module path is used. -Use dropdowns for multiple long-content sections on the same page. + Links to other pages should *preferably* use :ref: so references persist when files are moved. + + Use .. note:: and .. warning:: directives to emphasize an edge-case. + + Put breakout code snippets into a box using two colons ::. diff --git a/docs/running_libE.rst b/docs/running_libE.rst index 94dc50dab..bda358275 100644 --- a/docs/running_libE.rst +++ b/docs/running_libE.rst @@ -19,7 +19,7 @@ The default is ``mpi``. .. note:: You do not need the ``mpi`` communication mode to use the - :doc:`MPI Executor`. The communications modes described + :doc:`MPI Executor`. The communication modes described here only refer to how the libEnsemble manager and workers communicate. .. tab-set:: @@ -83,7 +83,7 @@ The default is ``mpi``. **Limitations of local mode** - - Workers cannot be :doc:`distributed` across nodes + - Workers cannot be :doc:`distributed` across nodes. - In some scenarios, any import of ``mpi4py`` will cause this to break. - Does not have the potential scaling of MPI mode, but is sufficient for most users. @@ -100,7 +100,7 @@ The default is ``mpi``. Set ``comms`` to ``ssh`` to launch workers on remote ssh-accessible systems. This colocates workers, functions, and any applications. User functions can also be persistent, unlike when launching remote functions via - :ref:`Globus Compute`. + :ref:`Globus Compute`. The remote working directory and Python need to be specified. This may resemble:: @@ -110,7 +110,7 @@ The default is ``mpi``. - There cannot be two calls to ``libE()`` or ``Ensemble.run()`` in the same script. -Further command line options +Further Command Line Options ---------------------------- See the **parse_args()** function in :doc:`Convenience Tools` for @@ -258,7 +258,7 @@ whenever the worker receives work. A persistent worker is one that continues to generator or simulation function between work units, maintaining the local data environment. A common use-case consists of a persistent generator (such as :doc:`persistent_aposmm`) -that maintains optimization data, while generating new simulation inputs. The persistent generator runs +that maintains optimization data while generating new simulation inputs. The persistent generator runs on a dedicated worker while in persistent mode. This requires an appropriate :doc:`allocation function` that will run the generator as persistent. @@ -283,9 +283,9 @@ For example:: os.environ["OMP_NUM_THREADS"] = 4 -set in your simulation script before the Executor submit command will export the setting to your run. -For running a bash script in a sub environment when use the Executor, see the ``env_script`` -option to the :doc:`MPI Executor`. +set in your simulation script before the Executor *submit* command will export the setting +to your run. For running a bash script in a sub environment when using the Executor, see +the ``env_script`` option to the :doc:`MPI Executor`. Further Run Information ----------------------- diff --git a/docs/tutorials/calib_cancel_tutorial.rst b/docs/tutorials/calib_cancel_tutorial.rst index d2f03f0e3..1c0fcff3c 100644 --- a/docs/tutorials/calib_cancel_tutorial.rst +++ b/docs/tutorials/calib_cancel_tutorial.rst @@ -8,11 +8,11 @@ Introduction - Calibration with libEnsemble and a Regression Model This tutorial demonstrates libEnsemble's capability to selectively cancel pending simulations based on instructions from a calibration Generator Function. This capability is desirable, especially when evaluations are expensive, since -compute resources may then be more effectively applied towards critical evaluations. +compute resources may then be more effectively applied toward critical evaluations. For a somewhat different approach than libEnsemble's :doc:`other tutorials`, we'll emphasize the settings, functions, and data fields within the calling script, -:ref:`persistent generator`, Manager, and :ref:`sim_f` +:ref:`persistent generator`, manager, and :ref:`sim_f` that make this capability possible, rather than outlining a step-by-step process. The libEnsemble regression test ``test_persistent_surmise_calib.py`` demonstrates @@ -33,7 +33,7 @@ is to find some parameter :math:`\theta_0` such that :math:`f(\theta_0, x)` clos resembles data collected from a physical experiment. For example, a (simple) physical experiment may involve dropping a ball at different heights to study the gravitational constant, and the corresponding computer model could be the set of -differential equations that governs the drop. In a case where the computation of +differential equations that govern the drop. In a case where the computation of the computer model is relatively expensive, we employ a fast surrogate model to approximate the model and to inform good parameters to test next. Here the computer model :math:`f(\theta, x)` is accessible only through performing :ref:`sim_f` @@ -139,7 +139,7 @@ be processed. The manager will send kill signals to workers that are already pro cancelled points. These signals can be caught and acted on by the user ``sim_f``; otherwise they will be ignored. -Allocation function and Cancellation configuration +Allocation Function and Cancellation Configuration -------------------------------------------------- The allocation function used in this example is the *only_persistent_gens* function in the @@ -208,7 +208,7 @@ Using cancellations to kill running simulations ------------------------------------------------ If a generated point is cancelled by the generator before it has been given to a worker for evaluation, -then it will never be given. If it has already returned from simulation, then results can be returned, +then it will never be given. If it has already returned from the simulation, then results can be returned, but the ``cancel_requested`` field remains as True. However, if the simulation is running when the manager receives the cancellation request, a kill signal will be sent to the worker. This can be caught and acted upon by a user function, otherwise it will be ignored. To demonstrate this, the test ``test_persistent_surmise_killsims.py`` diff --git a/docs/tutorials/executor_forces_tutorial.rst b/docs/tutorials/executor_forces_tutorial.rst index f24b9ce13..ce325027a 100644 --- a/docs/tutorials/executor_forces_tutorial.rst +++ b/docs/tutorials/executor_forces_tutorial.rst @@ -60,7 +60,6 @@ generation functions and call libEnsemble. Create a Python file called from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs if __name__ == "__main__": - # Initialize MPI Executor exctr = MPIExecutor() @@ -119,7 +118,7 @@ expect, and also to parameterize user functions: ensemble.gen_specs = GenSpecs( gen_f=gen_f, - inputs=[], # No input when start persistent generator + inputs=[], # No input when starting persistent generator persis_in=["sim_id"], # Return sim_ids of evaluated points to generator outputs=[("x", float, (1,))], user={ @@ -275,7 +274,7 @@ available to this worker. After submitting the "forces" app for execution, a :ref:`Task` object is returned that correlates with the launched app. -This object is roughly equivalent to a Python future, and can be polled, killed, +This object is roughly equivalent to a Python future and can be polled, killed, and evaluated in a variety of helpful ways. For now, we're satisfied with waiting for the task to complete via ``task.wait()``. diff --git a/docs/tutorials/forces_gpu_tutorial.rst b/docs/tutorials/forces_gpu_tutorial.rst index e672a3017..833b88c22 100644 --- a/docs/tutorials/forces_gpu_tutorial.rst +++ b/docs/tutorials/forces_gpu_tutorial.rst @@ -24,7 +24,7 @@ Simulation function ------------------- The ``sim_f`` (``forces_simf.py``) is as follows. The lines that are different -to the simple forces example are highlighted: +from the simple forces example are highlighted: .. code-block:: python :linenos: @@ -96,7 +96,7 @@ the `forces_gpu_var_resources`_ example, and skip lines 31-32. Line 37 simply prints out how the GPUs were assigned. If this is not as expected, :ref:`platform configuration` can be provided. -While this is sufficient for many/most users, note that it is possible to query +While this is sufficient for most users, note that it is possible to query the resources assigned to *this* worker (nodes and partitions of nodes), and use this information however you want. @@ -219,12 +219,12 @@ For example:: python run_libe_forces.py --comms local --nworkers 9 -See :ref:`zero resource workers` for more ways to express this. +See :ref:`zero-resource workers` for more ways to express this. Changing the number of GPUs per worker -------------------------------------- -If you want to have two GPUs per worker on the same system (four GPUs per node), +If you want to have two GPUs per worker on the same system (with four GPUs per node), you could assign only four workers. You will see that two GPUs are used for each forces run. @@ -242,11 +242,11 @@ simulation (they do not need to be passed as a ``sim_specs["in"]``). Further guidance on varying the resources assigned to workers can be found under the :doc:`resource manager<../resource_manager/resources_index>` section. -Multiple Applications +Multiple applications --------------------- Another variant of this example, forces_multi_app_, has two applications, one that -uses GPUs, and another that only uses CPUs. The dynamic resource management can +uses GPUs, and another that only uses CPUs. Dynamic resource management can manage both types of resources and assign these to the same nodes concurrently, for maximum efficiency. diff --git a/docs/tutorials/local_sine_tutorial.rst b/docs/tutorials/local_sine_tutorial.rst index 710ce6382..68613119f 100644 --- a/docs/tutorials/local_sine_tutorial.rst +++ b/docs/tutorials/local_sine_tutorial.rst @@ -58,7 +58,7 @@ need to write a new allocation function. for generating random numbers. * :ref:`gen_specs`: Dictionary with user-defined static fields and - parameters. Customizable parameters such as boundaries and batch + parameters. Customizable parameters such as lower and upper bounds and batch sizes are placed within the ``gen_specs["user"]`` dictionary, while input/output and other fields that libEnsemble needs to operate the generator are placed outside ``user``. @@ -74,7 +74,6 @@ need to write a new allocation function. def gen_random_sample(Input, persis_info, gen_specs): - # Pull out user parameters user_specs = gen_specs["user"] @@ -142,7 +141,6 @@ need to write a new allocation function. def sim_find_sine(Input, _, sim_specs): - # Create an output array of a single zero Output = np.zeros(1, dtype=sim_specs["out"]) @@ -154,7 +152,7 @@ need to write a new allocation function. Our simulator function is called by a worker for every work item produced by the generator function. This function calculates the sine of the passed value, - then returns it so the worker can store the result. + and then returns it so the worker can store the result. **Exercise** @@ -267,7 +265,7 @@ need to write a new allocation function. [(-0.37466051, 1.559+09, 2, 2, True, True, [-0.38403059], True, 0, 1.559+09) (-0.29279634, 1.559+09, 2, 3, True, True, [-2.84444261], True, 1, 1.559+09) ( 0.29358492, 1.559+09, 2, 4, True, True, [ 0.29797487], True, 2, 1.559+09) - (-0.3783986 , 1.559+09, 2, 1, True, True, [-0.38806564], True, 3, 1.559+09) + (-0.3783986, 1.559+09, 2, 1, True, True, [-0.38806564], True, 3, 1.559+09) (-0.45982062, 1.559+09, 2, 2, True, True, [-0.47779319], True, 4, 1.559+09) ... @@ -373,7 +371,7 @@ need to write a new allocation function. doesn't work, try appending ``--user`` to the end of the command. See the mpi4py_ docs for more information. - Verify that MPI has installed correctly with ``mpirun --version``. + Verify that MPI has been installed correctly with ``mpirun --version``. **Modifying the script** diff --git a/docs/utilities.rst b/docs/utilities.rst index 6043c7779..525119d46 100644 --- a/docs/utilities.rst +++ b/docs/utilities.rst @@ -14,7 +14,7 @@ Convenience Tools and Functions .. _p_gen_routines: These routines are commonly used within persistent generator functions - like ``persistent_aposmm`` in ``libensemble/gen_funcs/`` for intermediate + such as ``persistent_aposmm`` in ``libensemble/gen_funcs/`` for intermediate communication with the manager. Persistent simulator functions are also supported. .. automodule:: persistent_support diff --git a/docs/welcome.rst b/docs/welcome.rst index e068748a2..e85aac87f 100644 --- a/docs/welcome.rst +++ b/docs/welcome.rst @@ -35,9 +35,9 @@ libEnsemble A complete toolkit for dynamic ensembles of calculations - - New to libEnsemble? Start :doc:`here`. + - New to libEnsemble? :doc:`Start here`. - Try out libEnsemble with a :doc:`tutorial`. - - Go in depth by reading the :doc:`Overview`. + - Go in depth by reading the :doc:`full overview`. - See the :doc:`FAQ` for common questions and answers, errors, and resolutions. - Check us out on `GitHub`_. diff --git a/libensemble/ensemble.py b/libensemble/ensemble.py index 3eaae0b6c..e79947b91 100644 --- a/libensemble/ensemble.py +++ b/libensemble/ensemble.py @@ -33,7 +33,7 @@ class Ensemble: """ The primary object for a libEnsemble workflow. - Parses and validates settings, sets up logging, maintains output. + Parses and validates settings, sets up logging, and maintains output. .. dropdown:: Example :open: @@ -343,7 +343,7 @@ def run(self) -> (npt.NDArray, dict, int): .. dropdown:: MPI/comms Notes - Manager-worker intercommunications are parsed from the ``comms`` key of + Manager--worker intercommunications are parsed from the ``comms`` key of :ref:`libE_specs`. An MPI runtime is assumed by default if ``--comms local`` wasn't specified on the command-line or in ``libE_specs``. @@ -526,7 +526,7 @@ def add_random_streams(self, num_streams: int = 0, seed: str = ""): seed: str, Optional - Seed for NumPy's RNG + Seed for NumPy's RNG. """ if num_streams: @@ -540,7 +540,7 @@ def add_random_streams(self, num_streams: int = 0, seed: str = ""): def save_output(self, file: str): """ Writes out History array and persis_info to files. - If using a workflow_dir, will place with specified filename in that directory + If using a workflow_dir, will place with specified filename in that directory. Format: ``_results_History_length=_evals=_ranks=`` """ diff --git a/libensemble/gen_funcs/persistent_sampling_var_resources.py b/libensemble/gen_funcs/persistent_sampling_var_resources.py index af72512e6..c635b2a07 100644 --- a/libensemble/gen_funcs/persistent_sampling_var_resources.py +++ b/libensemble/gen_funcs/persistent_sampling_var_resources.py @@ -3,7 +3,7 @@ Each function generates points uniformly over the domain defined by ``gen_specs["user"]["ub"]`` and ``gen_specs["user"]["lb"]``. -Most functions use a random request of resources over a range, setting num_procs, num_gpus or +Most functions use a random request of resources over a range, setting num_procs, num_gpus, or resource sets. The function ``uniform_sample_with_var_gpus`` uses the ``x`` value to determine the number of GPUs requested. """ diff --git a/libensemble/libE.py b/libensemble/libE.py index 955e9b579..644a2e447 100644 --- a/libensemble/libE.py +++ b/libensemble/libE.py @@ -66,7 +66,6 @@ from libensemble.tools import add_unique_random_streams if __name__ == "__main__": - nworkers, is_manager, libE_specs, _ = parse_args() libE_specs["save_every_k_gens"] = 20 diff --git a/libensemble/resources/platforms.py b/libensemble/resources/platforms.py index e7457862a..3999082e9 100644 --- a/libensemble/resources/platforms.py +++ b/libensemble/resources/platforms.py @@ -1,10 +1,10 @@ """Module for platform specification This module defines the Platform class which can be used to determine a platform -(computing system) attributes. A number of known systems are provided. +(computing system) attributes. Many known systems are provided. These can be specified by the libE_specs options ``platform_specs`` (recommended). -If may also be specified, for known systems, via a string in the ``platform`` +It may also be specified, for known systems, via a string in the ``platform`` option or the environment variable ``LIBE_PLATFORM``. """ @@ -52,7 +52,7 @@ class Platform(BaseModel): Must take one of the following string options. - ``"runner_default"``: Use default setting for MPI runner (same as if not set). - - ``"env"``: Use an environment variable (comma separated list of slots) + - ``"env"``: Use an environment variable (comma-separated list of slots) - ``"option_gpus_per_node"``: Expresses GPUs per node on MPI runner command line. - ``"option_gpus_per_task"``: Expresses GPUs per task on MPI runner command line. @@ -100,9 +100,9 @@ class Platform(BaseModel): "gpu_env_fallback" = "ROCR_VISIBLE_DEVICES" This example will use the MPI runner default settings when using an MPI runner, but - will otherwise use ROCR_VISIBLE_DEVICES (e.g. if setting via function set_env_to_gpus). + will otherwise use ROCR_VISIBLE_DEVICES (e.g., if setting via function set_env_to_gpus). - If this is not set, the default is "CUDA_VISIBLE_DEVICES" + If this is not set, the default is "CUDA_VISIBLE_DEVICES". """ @@ -116,8 +116,8 @@ class Platform(BaseModel): Useful if setting an environment variable such as ``CUDA_VISIBLE_DEVICES``, where the value should match on each node of an MPI run (choose **True**). - When using command-line options just as ``--gpus-per-node``, which allow the systems - application level scheduler to manager GPUs, then ``match_slots`` can be **False** + When using command-line options just as ``--gpus-per-node``, which allow the system's + application-level scheduler to manage GPUs, then ``match_slots`` can be **False** (allowing for more efficient scheduling when MPI runs cross nodes). """ @@ -264,7 +264,7 @@ class Known_platforms(BaseModel): export LIBE_PLATFORM="perlmutter_g" - If the platform is not specified, libEnsemble will attempt detect known + If the platform is not specified, libEnsemble will attempt to detect known platforms (this is not guaranteed). **Note**: libEnsemble should work on any platform, and detects most @@ -307,7 +307,7 @@ def known_envs(): def known_system_detect(cmd="hostname -d"): """Detect known systems - This is a function attempts to detect if on a known system, but users + This function attempts to detect if on a known system, but users should specify systems to be sure. """ run_cmd = cmd.split() @@ -321,7 +321,7 @@ def known_system_detect(cmd="hostname -d"): def get_platform(libE_specs): - """Return platform as dictionary from relevant libE_specs option. + """Return platform as a dictionary from relevant libE_specs option. For internal use, return a platform as a dictionary from either platform name or platform_specs. diff --git a/libensemble/resources/resources.py b/libensemble/resources/resources.py index e6c5782c2..f7fca66ce 100644 --- a/libensemble/resources/resources.py +++ b/libensemble/resources/resources.py @@ -39,11 +39,11 @@ class Resources: The following are set up after manager/worker fork. - The resource manager is set up only on the manaager, while the worker resources object is set + The resource manager is set up only on the manager, while the worker resources object is set up on workers. :ivar ResourceManager resource_manager: An object that manages resource set assignment to workers. - :ivar WorkerResources worker_resources: An object that contains worker specific resources. + :ivar WorkerResources worker_resources: An object that contains worker-specific resources. """ resources = None @@ -116,7 +116,7 @@ def __init__(self, libE_specs: dict, platform_info: dict = {}, top_level_dir: st dedicated_mode: bool, Optional If true, then dedicate nodes to running libEnsemble. Dedicated mode means that any nodes running libE processes (manager and workers), - will not be available to worker launched tasks (user applications). They will + will not be available to worker-launched tasks (user applications). They will be removed from the nodelist (if present), before dividing into resource sets. zero_resource_workers: List[int], Optional diff --git a/libensemble/resources/scheduler.py b/libensemble/resources/scheduler.py index f4ad7e902..04de87e77 100644 --- a/libensemble/resources/scheduler.py +++ b/libensemble/resources/scheduler.py @@ -55,7 +55,7 @@ def __init__(self, user_resources=None, sched_opts={}): "match_slots" [Boolean]: When splitting resource sets across multiple nodes, slot IDs must match. Useful if setting an environment variable such as ``CUDA_VISIBLE_DEVICES`` - to specific slots counts, which should match over multiple nodes. + to specific slots, which should match over multiple nodes. Default: True """ diff --git a/libensemble/sim_funcs/var_resources.py b/libensemble/sim_funcs/var_resources.py index 2f222a94e..63d2b86c3 100644 --- a/libensemble/sim_funcs/var_resources.py +++ b/libensemble/sim_funcs/var_resources.py @@ -3,7 +3,7 @@ ``six_hump_camel`` and ``helloworld`` python scripts are used as example applications, but these could be any MPI application. -Each simulation function use the resources assigned to this worker to set CPU +Each simulation function uses the resources assigned to this worker to set CPU count and, in some functions, specify GPU usage. GPUs are not used for the six_hump_camel function, but these tests check the @@ -39,9 +39,9 @@ def gpu_variable_resources(H, persis_info, sim_specs, libE_info): The six_hump_camel app does not run on the GPU, but this test demonstrates how to automatically assign the GPUs given to this worker via the MPIExecutor. - The method used to assign GPUs will be determined by the MPI runner or user - provided configuration (e.g., by setting the ``platform`` or ``platform_specs`` - options or the LIBE_PLATFORM environment variable). + The method used to assign GPUs will be determined by the MPI runner or + user-provided configuration (e.g., by setting the ``platform`` or + ``platform_specs`` options or the LIBE_PLATFORM environment variable). """ x = H["x"][0] @@ -190,7 +190,7 @@ def multi_points_with_variable_resources(H, _, sim_specs, libE_info): nodes. Note that this is also an example that is capable of handling multiple - points (sim ids) in one each call. + points (sim ids) in each call. .. seealso:: `test_uniform_sampling_with_variable_resources.py `_ # noqa diff --git a/libensemble/specs.py b/libensemble/specs.py index 3ae3eaa82..881be5263 100644 --- a/libensemble/specs.py +++ b/libensemble/specs.py @@ -54,7 +54,7 @@ class SimSpecs(BaseModel): persis_in: Optional[List[str]] = [] """ List of **field names** to send to a persistent simulation function - throughout runtime, following initialization. + throughout the run, following initialization. """ # list of tuples for dtype construction @@ -116,7 +116,7 @@ class GenSpecs(BaseModel): persis_in: Optional[List[str]] = [] """ List of **field names** to send to a persistent generator function - throughout runtime, following initialization. + throughout the run, following initialization. """ outputs: List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]] = Field([], alias="out") @@ -262,13 +262,13 @@ class LibeSpecs(BaseModel): ensemble_dir_path: Optional[Union[str, Path]] = Path("ensemble") """ Path to main ensemble directory. Can serve - as single working directory for workers, or contain calculation directories + as a single working directory for workers, or contain calculation directories """ ensemble_copy_back: Optional[bool] = False """ Whether to copy back contents of ``ensemble_dir_path`` to launch - location. Useful if ``ensemble_dir_path`` located on node-local storage. + location. Useful if ``ensemble_dir_path`` is located on node-local storage. """ use_worker_dirs: Optional[bool] = False @@ -421,7 +421,7 @@ class LibeSpecs(BaseModel): final_gen_send: Optional[bool] = False """ - Send final simulations results to persistent generators before shutdown. + Send final simulation results to persistent generators before shutdown. The results will be sent along with the ``PERSIS_STOP`` tag. """ @@ -453,7 +453,7 @@ class LibeSpecs(BaseModel): enforce_worker_core_bounds: Optional[bool] = False """ - If ``False``, the Executor will permit submission of tasks with a + If ``False``, the Executor will permit the submission of tasks with a higher processor count than the CPUs available to the worker as detected by the resource manager. Larger node counts are not allowed. When ``"disable_resource_manager"`` is ``True``, @@ -531,7 +531,7 @@ def set_workflow_dir(cls, values): class _EnsembleSpecs(BaseModel): - """An all-encompasing model for a libEnsemble workflow.""" + """An all-encompassing model for a libEnsemble workflow.""" H0: Optional[Any] = None # np.ndarray - avoids sphinx issue """ A previous or preformatted libEnsemble History array to prepend. """ diff --git a/libensemble/tests/functionality_tests/test_GPU_gen_resources.py b/libensemble/tests/functionality_tests/test_GPU_gen_resources.py index fbefb98c5..6e692dfa2 100644 --- a/libensemble/tests/functionality_tests/test_GPU_gen_resources.py +++ b/libensemble/tests/functionality_tests/test_GPU_gen_resources.py @@ -6,16 +6,16 @@ while also requiring resources itself. The resources required by a sim must not be larger than what remains once the generator resources are assigned. -The sim_f (gpu_variable_resources_from_gen) asserts that GPUs assignment +The sim_f (gpu_variable_resources_from_gen) asserts that the GPU assignment is correct for the default method for the MPI runner. GPUs are not actually -used for default application. Four GPUs per node is mocked up below (if this line +used for the default application. Four GPUs per node is mocked up below (if this line is removed, libEnsemble will detect any GPUs available). A dry_run option is provided. This can be set in the calling script, and will just print run-lines and GPU settings. This may be used for testing run-lines produced and GPU settings for different MPI runners. -Execute via one of the following commands (e.g. 4 workers): +Execute via one of the following commands (e.g., 4 workers): mpiexec -np 5 python test_GPU_gen_resources.py python test_GPU_gen_resources.py --comms local --nworkers 4 diff --git a/libensemble/tests/functionality_tests/test_mpi_gpu_settings_env.py b/libensemble/tests/functionality_tests/test_mpi_gpu_settings_env.py index 3757333cc..2265f7175 100644 --- a/libensemble/tests/functionality_tests/test_mpi_gpu_settings_env.py +++ b/libensemble/tests/functionality_tests/test_mpi_gpu_settings_env.py @@ -1,14 +1,14 @@ """ -Tests options to run an an application in a bash specified environment, without +Tests options to run an application in a bash-specified environment, without affecting the parent environment. This is based on the variable resource detection and automatic GPU assignment test. -This test uses the dry_run option to test correct runline and GPU settings for -different mocked up systems. Test assertions are in the sim function via the -check_mpi_runner and check_gpu_setting functions. +This test uses the dry_run option to test the correct runline and GPU settings +for different mocked-up systems. Test assertions are in the sim function via +the check_mpi_runner and check_gpu_setting functions. -Execute via one of the following commands (e.g. 5 workers): +Execute via one of the following commands (e.g., 5 workers): mpiexec -np 6 python test_mpi_gpu_settings_env.py python test_mpi_gpu_settings_env.py --comms local --nworkers 5 diff --git a/libensemble/tests/functionality_tests/test_persistent_uniform_sampling_running_mean.py b/libensemble/tests/functionality_tests/test_persistent_uniform_sampling_running_mean.py index 42095ea76..dc56d4013 100644 --- a/libensemble/tests/functionality_tests/test_persistent_uniform_sampling_running_mean.py +++ b/libensemble/tests/functionality_tests/test_persistent_uniform_sampling_running_mean.py @@ -2,7 +2,7 @@ Tests the ability of libEnsemble to - give back history entries from the a shutting-down persistent gen -Execute via one of the following commands (e.g. 3 workers): +Execute via one of the following commands (e.g., 3 workers): mpiexec -np 4 python test_persistent_uniform_sampling_adv.py python test_persistent_uniform_sampling_running_mean.py --nworkers 3 --comms local python test_persistent_uniform_sampling_running_mean.py --nworkers 3 --comms tcp diff --git a/libensemble/tests/regression_tests/test_1d_sampling.py b/libensemble/tests/regression_tests/test_1d_sampling.py index 3a197a345..ad668d6c3 100644 --- a/libensemble/tests/regression_tests/test_1d_sampling.py +++ b/libensemble/tests/regression_tests/test_1d_sampling.py @@ -26,8 +26,16 @@ # Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). if __name__ == "__main__": sampling = Ensemble(parse_args=True) - sampling.libE_specs = LibeSpecs(save_every_k_gens=300, safe_mode=False, disable_log_files=True) - sampling.sim_specs = SimSpecs(sim_f=sim_f, inputs=["x"], outputs=[("f", float)]) + sampling.libE_specs = LibeSpecs( + save_every_k_gens=300, + safe_mode=False, + disable_log_files=True, + ) + sampling.sim_specs = SimSpecs( + sim_f=sim_f, + inputs=["x"], + outputs=[("f", float)], + ) sampling.gen_specs = GenSpecs( gen_f=gen_f, outputs=[("x", float, (1,))], diff --git a/libensemble/tests/regression_tests/test_persistent_surmise_calib.py b/libensemble/tests/regression_tests/test_persistent_surmise_calib.py index 21d3cd757..de8f15597 100644 --- a/libensemble/tests/regression_tests/test_persistent_surmise_calib.py +++ b/libensemble/tests/regression_tests/test_persistent_surmise_calib.py @@ -79,8 +79,8 @@ persis_in=[o[0] for o in gen_out] + ["f", "sim_ended", "sim_id"], out=gen_out, user={ - "n_init_thetas": n_init_thetas, # Num thetas in initial batch - "num_x_vals": n_x, # Num x points to create + "n_init_thetas": n_init_thetas, # No. of thetas in initial batch + "num_x_vals": n_x, # No. of x points to create "step_add_theta": step_add_theta, # No. of thetas to generate per step "n_explore_theta": n_explore_theta, # No. of thetas to explore each step "obsvar": obsvar, # Variance for generating noise in obs @@ -109,7 +109,7 @@ print("Cancelled sims", H["sim_id"][H["cancel_requested"]]) sims_done = np.count_nonzero(H["sim_ended"]) test.save_output(__file__) - assert sims_done == max_evals, f"Num of completed simulations should be {max_evals}. Is {sims_done}" + assert sims_done == max_evals, f"No. of completed simulations should be {max_evals}. Is {sims_done}" # The following line is only to cover parts of tstd2theta tstd2theta(H[0]["thetas"].squeeze(), hard=False) diff --git a/libensemble/tests/regression_tests/test_persistent_surmise_killsims.py b/libensemble/tests/regression_tests/test_persistent_surmise_killsims.py index 31984bee8..fb074fae2 100644 --- a/libensemble/tests/regression_tests/test_persistent_surmise_killsims.py +++ b/libensemble/tests/regression_tests/test_persistent_surmise_killsims.py @@ -76,7 +76,7 @@ libE_specs["sim_dirs_make"] = True # To keep all - make sim dirs # libE_specs["use_worker_dirs"] = True # To overwrite - make worker dirs only - # Rename ensemble dir for non-inteference with other regression tests + # Rename ensemble dir for non-interference with other regression tests libE_specs["ensemble_dir_path"] = "ensemble_calib_kills" sim_specs = { diff --git a/libensemble/tests/regression_tests/test_with_app_persistent_aposmm_tao_nm.py b/libensemble/tests/regression_tests/test_with_app_persistent_aposmm_tao_nm.py index 87c152dbf..acffba6c7 100644 --- a/libensemble/tests/regression_tests/test_with_app_persistent_aposmm_tao_nm.py +++ b/libensemble/tests/regression_tests/test_with_app_persistent_aposmm_tao_nm.py @@ -5,13 +5,13 @@ This is to test the scenario, where OpenMPI will fail due to nested MPI, if PETSc is imported at global level. -Execute via one of the following commands (e.g. 3 workers): +Execute via one of the following commands (e.g., 3 workers): mpiexec -np 4 python test_persistent_aposmm_tao_nm.py python test_with_app_persistent_aposmm_tao_nm.py --nworkers 3 --comms local python test_with_app_persistent_aposmm_tao_nm.py --nworkers 3 --comms tcp When running with the above commands, the number of concurrent evaluations of -the objective function will be 2, as one of the three workers will be the +the objective function will be 2, since one of the three workers will be the persistent generator. """ @@ -35,7 +35,7 @@ from libensemble.sim_funcs.var_resources import multi_points_with_variable_resources as sim_f from libensemble.tools import add_unique_random_streams, parse_args -# For OpenMPI must not have these lines, allowing PETSc to import in function. +# For OpenMPI the following lines cannot be used, thus allowing PETSc to import. # import libensemble.gen_funcs # libensemble.gen_funcs.rc.aposmm_optimizers = "petsc" diff --git a/libensemble/tests/scaling_tests/forces/forces_adv/forces_simf.py b/libensemble/tests/scaling_tests/forces/forces_adv/forces_simf.py index 2348aac50..25097205d 100644 --- a/libensemble/tests/scaling_tests/forces/forces_adv/forces_simf.py +++ b/libensemble/tests/scaling_tests/forces/forces_adv/forces_simf.py @@ -2,6 +2,7 @@ import time import numpy as np + from libensemble.message_numbers import TASK_FAILED, WORKER_DONE, WORKER_KILL MAX_SEED = 32767 diff --git a/libensemble/tests/scaling_tests/forces/forces_gpu/readme.md b/libensemble/tests/scaling_tests/forces/forces_gpu/readme.md index 7099ad971..c81833ea8 100644 --- a/libensemble/tests/scaling_tests/forces/forces_gpu/readme.md +++ b/libensemble/tests/scaling_tests/forces/forces_gpu/readme.md @@ -30,7 +30,7 @@ see `forces_gpu_var_resources`). Naive Electrostatics Code Test This is a synthetic, highly configurable simulation function. This test aims -to show libEnsemble's capability to set assign GPU resources as needed by each +to show libEnsemble's capability to assign GPU resources as needed by each worker and to launch application instances via the `MPIExecutor`. ### Forces Mini-App diff --git a/libensemble/tests/scaling_tests/forces/forces_gpu/run_libe_forces.py b/libensemble/tests/scaling_tests/forces/forces_gpu/run_libe_forces.py index f6addea54..932709d57 100644 --- a/libensemble/tests/scaling_tests/forces/forces_gpu/run_libe_forces.py +++ b/libensemble/tests/scaling_tests/forces/forces_gpu/run_libe_forces.py @@ -11,7 +11,7 @@ and confirm it is running on the GPU (this is given clearly in the output). To mock on a non-GPU system, uncomment the resource_info line in libE_specs. You -will compile forces without -DGPU option. It is recommended that the lb/ub for +will compile forces without -DGPU option. It is recommended that the ub and/or lb for particle counts are reduced for CPU performance. """ @@ -84,7 +84,7 @@ ensemble.run() if ensemble.is_manager: - # Note, this will change if change sim_max, nworkers, lb/ub etc... + # Note, this will change if changing sim_max, nworkers, lb, ub, etc. if ensemble.exit_criteria.sim_max == 8: chksum = np.sum(ensemble.H["energy"]) assert np.isclose(chksum, 96288744.35136001), f"energy check sum is {chksum}" diff --git a/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/readme.md b/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/readme.md index 86d7141ef..b76aeed83 100644 --- a/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/readme.md +++ b/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/readme.md @@ -1,6 +1,6 @@ ## Tutorial -This example referred to in the tutorial **Executor - Assign GPUs**. +This example is referred to in the tutorial **Executor - Assign GPUs**. When the generator creates parameters for each simulation, it sets a number of GPUs required for the simulation. Resources are dynamically assigned to diff --git a/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/run_libe_forces.py b/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/run_libe_forces.py index e89d02fdd..8eec289a3 100644 --- a/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/run_libe_forces.py +++ b/libensemble/tests/scaling_tests/forces/forces_gpu_var_resources/run_libe_forces.py @@ -6,7 +6,7 @@ The forces.c application should be built by setting the GPU preprocessor condition (usually -DGPU) in addition to openMP GPU flags for the given system. See examples in ../forces_app/build_forces.sh. We recommend running forces.x standalone first -and confirm it is running on the GPU (this is given clearly in the output). +and confirming it is running on the GPU (this is given clearly in the output). A number of GPUs is requested based on the number of particles (randomly chosen from the range for each simulation). For simplicity, the number of GPUs requested @@ -14,7 +14,7 @@ count. To mock on a non-GPU system, uncomment the resource_info line in libE_specs. You -will compile forces without -DGPU option. It is recommended that the lb/ub for +will compile forces without the -DGPU option. It is recommended that the ub and/or lb for particle counts are reduced for CPU performance. """ @@ -82,7 +82,7 @@ }, ) - # Instruct libEnsemble to exit after this many simulations + # Instruct libEnsemble to exit after this many simulations. ensemble.exit_criteria = ExitCriteria(sim_max=8) # Seed random streams for each worker, particularly for gen_f @@ -92,10 +92,10 @@ ensemble.run() if ensemble.is_manager: - # Note, this will change if change sim_max, nworkers, lb/ub etc... + # Note, this will change if changing sim_max, nworkers, lb, ub, etc. if ensemble.exit_criteria.sim_max == 8: chksum = np.sum(ensemble.H["energy"]) assert np.isclose(chksum, 96288744.35136001), f"energy check sum is {chksum}" print("Checksum passed") else: - print("Run complete. A checksum has not been provided for the given sim_max") + print("Run complete; a checksum has not been provided for the given sim_max") diff --git a/libensemble/tests/scaling_tests/forces/forces_multi_app/run_libe_forces.py b/libensemble/tests/scaling_tests/forces/forces_multi_app/run_libe_forces.py index e0a991fde..235aa9227 100644 --- a/libensemble/tests/scaling_tests/forces/forces_multi_app/run_libe_forces.py +++ b/libensemble/tests/scaling_tests/forces/forces_multi_app/run_libe_forces.py @@ -1,8 +1,8 @@ #!/usr/bin/env python """ -This example runs two difference applications, one that uses only CPUs and one -that uses GPUs. Both uses a variable number of processors. The GPU application +This example runs different applications, one that uses only CPUs and one +that uses GPUs. Both use a variable number of processors. The GPU application uses one GPU per processor. As the generator creates simulations, it randomly assigns between one and max_proc processors to each simulation, and also randomly assigns which application is to be run. @@ -15,10 +15,10 @@ It is recommended to run this test such that: ((nworkers - 1) - gpus_on_node) >= gen_specs["user"][max_procs] -E.g, if running on one node with four GPUs, then use: +E.g., if running on one node with four GPUs, then use: python run_libE_forces.py --comms local --nworkers 9 -E.g, if running on one node with eight GPUs, then use: +E.g., if running on one node with eight GPUs, then use: python run_libE_forces.py --comms local --nworkers 17 """ @@ -70,12 +70,12 @@ ensemble.gen_specs = GenSpecs( gen_f=gen_f, - inputs=[], # No input when start persistent generator + inputs=[], # No input when starting persistent generator persis_in=["sim_id"], # Return sim_ids of evaluated points to generator outputs=[ ("x", float, (1,)), - ("num_procs", int), # num_procs auto given to sim when use MPIExecutor - ("num_gpus", int), # num_gpus auto given to sim when use MPIExecutor + ("num_procs", int), # num_procs auto given to sim when using MPIExecutor + ("num_gpus", int), # num_gpus auto given to sim when using MPIExecutor ("app_type", "S10"), # select app type (cpu_app or gpu_app) ], user={ @@ -94,17 +94,17 @@ }, ) - # Instruct libEnsemble to exit after this many simulations + # Instruct libEnsemble to exit after this many simulations. ensemble.exit_criteria = ExitCriteria(sim_max=nsim_workers * 2) - # Seed random streams for each worker, particularly for gen_f + # Seed random streams for each worker, particularly for gen_f. ensemble.add_random_streams() # Run ensemble ensemble.run() if ensemble.is_manager: - # Note, this will change if change sim_max, nworkers, lb/ub etc... + # Note, this will change if changing sim_max, nworkers, lb, ub, etc. chksum = np.sum(ensemble.H["energy"]) print(f"Final energy checksum: {chksum}") diff --git a/libensemble/tests/scaling_tests/forces/forces_simple/run_libe_forces.py b/libensemble/tests/scaling_tests/forces/forces_simple/run_libe_forces.py index b5cb58a5c..a70477748 100644 --- a/libensemble/tests/scaling_tests/forces/forces_simple/run_libe_forces.py +++ b/libensemble/tests/scaling_tests/forces/forces_simple/run_libe_forces.py @@ -69,5 +69,5 @@ ensemble.run() if ensemble.is_manager: - # Note, this will change if change sim_max, nworkers, lb/ub etc... + # Note, this will change if changing sim_max, nworkers, lb, ub, etc. print(f'Final energy checksum: {np.sum(ensemble.H["energy"])}') diff --git a/libensemble/tests/scaling_tests/forces/funcx_forces/cleanup.sh b/libensemble/tests/scaling_tests/forces/globus_compute_forces/cleanup.sh similarity index 100% rename from libensemble/tests/scaling_tests/forces/funcx_forces/cleanup.sh rename to libensemble/tests/scaling_tests/forces/globus_compute_forces/cleanup.sh diff --git a/libensemble/tests/scaling_tests/forces/funcx_forces/forces_simf.py b/libensemble/tests/scaling_tests/forces/globus_compute_forces/forces_simf.py similarity index 100% rename from libensemble/tests/scaling_tests/forces/funcx_forces/forces_simf.py rename to libensemble/tests/scaling_tests/forces/globus_compute_forces/forces_simf.py diff --git a/libensemble/tests/scaling_tests/forces/funcx_forces/globus_compute_forces.yaml b/libensemble/tests/scaling_tests/forces/globus_compute_forces/globus_compute_forces.yaml similarity index 88% rename from libensemble/tests/scaling_tests/forces/funcx_forces/globus_compute_forces.yaml rename to libensemble/tests/scaling_tests/forces/globus_compute_forces/globus_compute_forces.yaml index 35545979b..c7c0463bc 100644 --- a/libensemble/tests/scaling_tests/forces/funcx_forces/globus_compute_forces.yaml +++ b/libensemble/tests/scaling_tests/forces/globus_compute_forces/globus_compute_forces.yaml @@ -6,7 +6,7 @@ exit_criteria: sim_max: 8 sim_specs: - sim_f: libensemble.tests.scaling_tests.forces.funcx_forces.forces_simf.run_forces_globus_compute + sim_f: libensemble.tests.scaling_tests.forces.globus_compute_forces.forces_simf.run_forces_globus_compute inputs: - x outputs: diff --git a/libensemble/tests/scaling_tests/forces/funcx_forces/readme.md b/libensemble/tests/scaling_tests/forces/globus_compute_forces/readme.md similarity index 76% rename from libensemble/tests/scaling_tests/forces/funcx_forces/readme.md rename to libensemble/tests/scaling_tests/forces/globus_compute_forces/readme.md index 3cda13f82..b5f6250ec 100644 --- a/libensemble/tests/scaling_tests/forces/funcx_forces/readme.md +++ b/libensemble/tests/scaling_tests/forces/globus_compute_forces/readme.md @@ -18,25 +18,20 @@ See below. ### Running with libEnsemble. -On the remote machine: - - pip install funcx-endpoint - funcx-endpoint configure forces - -Configure the endpoint's `config.py` to include your project information and +On the remote machine, Configure the endpoint's `config.py` to include your project information and match the machine's specifications. See [here](https://funcx.readthedocs.io/en/latest/endpoints.html#theta-alcf) for an example ALCF Theta configuration. Then to run with local comms (multiprocessing) with one manager and `N` workers: - python run_libe_forces_funcx.py --comms local --nworkers N + python run_libe_forces_globus_compute.py --comms local --nworkers N To run with MPI comms using one manager and `N-1` workers: - mpirun -np N python run_libe_forces.py + mpirun -np N python run_libe_forces_globus_compute.py -Application parameters can be adjusted in `funcx_forces.yaml`. +Application parameters can be adjusted in `globus_compute_forces.yaml`. Note that each function and path must be accessible and/or importable on the remote machine. Absolute paths are recommended. diff --git a/libensemble/tests/scaling_tests/forces/funcx_forces/run_libe_forces_funcx.py b/libensemble/tests/scaling_tests/forces/globus_compute_forces/run_libe_forces_globus_compute.py similarity index 100% rename from libensemble/tests/scaling_tests/forces/funcx_forces/run_libe_forces_funcx.py rename to libensemble/tests/scaling_tests/forces/globus_compute_forces/run_libe_forces_globus_compute.py diff --git a/libensemble/tests/standalone_tests/kill_test/log.autotest.txt b/libensemble/tests/standalone_tests/kill_test/log.autotest.txt index 71bda9a90..fa4fbd64e 100644 --- a/libensemble/tests/standalone_tests/kill_test/log.autotest.txt +++ b/libensemble/tests/standalone_tests/kill_test/log.autotest.txt @@ -21,13 +21,13 @@ Either run on local node - or create an allocation of nodes and run:: where kill_type currently is 1 or 2. [1 is the original kill - 2 is using group ID approach] -E.g Single node with 4 processes: +Single node with 4 processes: -------------------------------- kill 1: python killtest.py 1 1 4 kill 2: python killtest.py 2 1 4 -------------------------------- -E.g Two nodes with 2 processes each: +Two nodes with 2 processes each: -------------------------------- kill 1: python killtest.py 1 2 2 kill 2: python killtest.py 2 2 2 diff --git a/libensemble/tests/standalone_tests/kill_test/readme.txt b/libensemble/tests/standalone_tests/kill_test/readme.txt index b8bc112bf..77fae9012 100644 --- a/libensemble/tests/standalone_tests/kill_test/readme.txt +++ b/libensemble/tests/standalone_tests/kill_test/readme.txt @@ -35,13 +35,13 @@ Either run on local node - or create an allocation of nodes and run:: where kill_type currently is 1 or 2. [1 is the original kill - 2 is using group ID approach] -E.g Single node with 4 processes: +Single node with 4 processes: -------------------------------- kill 1: python killtest.py 1 1 4 kill 2: python killtest.py 2 1 4 -------------------------------- -E.g Two nodes with 2 processes each: +Two nodes with 2 processes each: -------------------------------- kill 1: python killtest.py 1 2 2 kill 2: python killtest.py 2 2 2 diff --git a/libensemble/tests/standalone_tests/node_concurrency_test/readme.txt b/libensemble/tests/standalone_tests/node_concurrency_test/readme.txt index 60d025e99..028911385 100644 --- a/libensemble/tests/standalone_tests/node_concurrency_test/readme.txt +++ b/libensemble/tests/standalone_tests/node_concurrency_test/readme.txt @@ -9,7 +9,7 @@ The standard timing study does two sets of two batches, and then two sets of four batches and produces a file time.out inside the output directory (which is the platform name appended by datetime). -Be sure to modify project code in submission script as required. +Be sure to modify the project code in the submission script as required. Instructions for Perlmutter --------------------------- diff --git a/libensemble/tests/standalone_tests/node_concurrency_test/run/forces.c b/libensemble/tests/standalone_tests/node_concurrency_test/run/forces.c index 57a7b79a5..aa6498d9c 100755 --- a/libensemble/tests/standalone_tests/node_concurrency_test/run/forces.c +++ b/libensemble/tests/standalone_tests/node_concurrency_test/run/forces.c @@ -1,5 +1,5 @@ /* -------------------------------------------------------------------- - Naive Electrostatics Code Example + Basic Electrostatics Code Example This is designed only as an artificial, highly configurable test code for a libEnsemble sim func. @@ -12,7 +12,7 @@ Added OpenMP options for CPU and GPU. Jan 2022: - Use GPU preprocessor option to compile for GPU (e.g. -DGPU). + Use GPU preprocessor option to compile for GPU (e.g., -DGPU). Run executable on N procs: diff --git a/libensemble/tests/unit_tests/test_executor.py b/libensemble/tests/unit_tests/test_executor.py index 4e0555170..85f932408 100644 --- a/libensemble/tests/unit_tests/test_executor.py +++ b/libensemble/tests/unit_tests/test_executor.py @@ -1,5 +1,5 @@ # !/usr/bin/env python -# Integration Test of executor module for libensemble +# Integration test of executor module for libensemble # Test does not require running full libensemble import os import platform @@ -67,7 +67,7 @@ def build_simfuncs(): subprocess.check_call(buildstring.split()) -# This would typically be in the user calling script +# This would typically be in the user calling script. def setup_executor(): """Set up an MPI Executor with sim app""" from libensemble.executors.mpi_executor import MPIExecutor @@ -135,7 +135,7 @@ def is_ompi(): # ----------------------------------------------------------------------------- -# The following would typically be in the user sim_func +# The following would typically be in the user sim_func. def polling_loop(exctr, task, timeout_sec=1, delay=0.05): """Iterate over a loop, polling for an exit condition""" start = time.time() @@ -143,7 +143,7 @@ def polling_loop(exctr, task, timeout_sec=1, delay=0.05): while time.time() - start < timeout_sec: time.sleep(delay) - # Check output file for error + # Check output file for error. if task.stdout_exists(): if "Error" in task.read_stdout() or "error" in task.read_stdout(): print("Found(deliberate) Error in output file - cancelling task") @@ -421,7 +421,7 @@ def test_procs_and_machinefile_logic(): assert task.finished, "task.finished should be True. Returned " + str(task.finished) assert task.state == "FINISHED", "task.state should be FINISHED. Returned " + str(task.state) - # Testing num_procs not num_nodes*procs_per_node (should fail) + # Testing num_procs not num_nodes*procs_per_node (should fail). try: task = exctr.submit(calc_type="sim", num_procs=9, num_nodes=2, procs_per_node=5, app_args=args_for_sim) except MPIResourcesException as e: @@ -429,7 +429,7 @@ def test_procs_and_machinefile_logic(): else: assert 0 - # Testing no num_procs (shouldn't fail) + # Testing no num_procs (should not fail). if is_ompi(): task = exctr.submit( calc_type="sim", @@ -445,7 +445,7 @@ def test_procs_and_machinefile_logic(): assert task.finished, "task.finished should be True. Returned " + str(task.finished) assert task.state == "FINISHED", "task.state should be FINISHED. Returned " + str(task.state) - # Testing nothing given (should fail) + # Testing nothing given (should fail). try: task = exctr.submit(calc_type="sim", app_args=args_for_sim) except MPIResourcesException as e: @@ -453,7 +453,7 @@ def test_procs_and_machinefile_logic(): else: assert 0 - # Testing no num_nodes (shouldn't fail) + # Testing no num_nodes (should not fail). task = exctr.submit(calc_type="sim", num_procs=2, procs_per_node=2, app_args=args_for_sim) assert 1 task = polling_loop(exctr, task, delay=0.05) @@ -471,9 +471,9 @@ def test_procs_and_machinefile_logic(): @pytest.mark.extra @pytest.mark.timeout(20) def test_doublekill(): - """Test attempt to kill already killed task + """Test attempt to kill already killed task. - Kill should have no effect (except warning message) and should remain in state killed + Kill should have no effect (except warning message) and should remain in state killed. """ print(f"\nTest: {sys._getframe().f_code.co_name}\n") setup_executor() @@ -495,9 +495,9 @@ def test_doublekill(): @pytest.mark.extra @pytest.mark.timeout(20) def test_finish_and_kill(): - """Test attempt to kill already finished task + """Test attempt to kill already finished task. - Kill should have no effect (except warning message) and should remain in state FINISHED + Kill should have no effect (except warning message) and should remain in state FINISHED. """ print(f"\nTest: {sys._getframe().f_code.co_name}\n") setup_executor() @@ -548,7 +548,7 @@ def test_launch_as_gen(): cores = NCORES args_for_sim = "sleep 0.1" - # Try launching as gen when not registered as gen + # Try launching as gen when not registered as gen. try: task = exctr.submit(calc_type="gen", num_procs=cores, app_args=args_for_sim) except ExecutorException as e: @@ -562,7 +562,7 @@ def test_launch_as_gen(): assert task.finished, "task.finished should be True. Returned " + str(task.finished) assert task.state == "FINISHED", "task.state should be FINISHED. Returned " + str(task.state) - # Try launching as 'alloc' which is not a type + # Try launching as 'alloc', which is not a type. try: task = exctr.submit(calc_type="alloc", num_procs=cores, app_args=args_for_sim) except ExecutorException as e: @@ -608,7 +608,7 @@ def test_kill_task_with_no_submit(): else: assert 0 - # Create a task directly with no submit (Not supported for users) + # Create a task directly with no submit (not supported for users). # Debatably make taskID 0 as executor should be deleted if use setup function. # But this allows any task ID. exp_msg = "task libe_task_my_simtask.x_.+has no process ID - check task has been launched" @@ -631,7 +631,7 @@ def test_poll_task_with_no_submit(): setup_executor() exctr = Executor.executor - # Create a task directly with no submit (Not supported for users) + # Create a task directly with no submit (Not supported for users). exp_msg = "task libe_task_my_simtask.x_.+has no process ID - check task has been launched" exp_re = re.compile(exp_msg) myapp = exctr.sim_default_app diff --git a/libensemble/version.py b/libensemble/version.py index 2999d005b..5becc17c0 100644 --- a/libensemble/version.py +++ b/libensemble/version.py @@ -1 +1 @@ -__version__ = "0.10.2+dev" +__version__ = "1.0.0" diff --git a/scripts/print_fields.py b/scripts/print_fields.py index f6e4a28c6..5e408f8db 100755 --- a/scripts/print_fields.py +++ b/scripts/print_fields.py @@ -11,7 +11,7 @@ If no fields are supplied the whole array is printed. - You can filter by using conditions, but only Boolean are supported currently e.g: + You can filter by using conditions, but only Booleans are supported currently e.g.,: ./print_fields.py out1.npy -f sim_id x f -c given ~returned would show lines where given is True and returned is False. diff --git a/scripts/readme.rst b/scripts/readme.rst index 0c4513163..516a03a05 100644 --- a/scripts/readme.rst +++ b/scripts/readme.rst @@ -7,12 +7,12 @@ The following scripts must be run in the directory with the ``libE_stats.txt`` file. They extract and plot information from that file. * ``plot_libe_calcs_util_v_time.py``: Extracts worker utilization vs. time plot - (with one-second sampling). Shows number of workers running user sim or gen - functions over time. + (with one-second sampling). Shows the number of workers running user sim or + gen functions over time. * ``plot_libe_tasks_util_v_time.py``: Extracts launched task utilization v time - plot (with one second sampling). Shows number of workers with active tasks, - launched via the executor, over time. + plot (with one-second sampling). Shows the number of workers with active + tasks, launched via the executor, over time. * ``plot_libe_histogram.py``: Creates histogram showing the number of completed/killed/failed user calculations binned by run time. diff --git a/setup.py b/setup.py index 0ec185370..681f40b81 100644 --- a/setup.py +++ b/setup.py @@ -96,7 +96,7 @@ def run_tests(self): "scripts/libesubmit", ], classifiers=[ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License",