Skip to content

Commit

Permalink
Merge pull request #88 from optimas-org/feature/libE_1.10.0
Browse files Browse the repository at this point in the history
Updating for new libE interface
  • Loading branch information
AngelFP authored Jul 27, 2023
2 parents 65a4562 + fcf20c8 commit 808051e
Show file tree
Hide file tree
Showing 14 changed files with 134 additions and 112 deletions.
2 changes: 1 addition & 1 deletion optimas/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '0.1.1'
__version__ = '0.2.0'
33 changes: 30 additions & 3 deletions optimas/evaluators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,34 @@ class Evaluator:
sim_function : callable
The simulation function (as defined in libEnsemble) to be used for
carrying out the evaluations.
n_procs : int, optional
The number of processes that will be used for each evaluation. By
default, ``n_procs=1`` if ``n_gpus`` is not given. Otherwise, the
default behavior is to match the number of processes to the number
of GPUs, i.e., ``n_procs=n_gpus``.
n_gpus : int, optional
The number of GPUs that will be made available for each evaluation. BY
default, 1.
The number of GPUs that will be made available for each evaluation. By
default, 0.
"""
def __init__(
self,
sim_function: Callable,
n_gpus: Optional[int] = 1
n_procs: Optional[int] = None,
n_gpus: Optional[int] = None
) -> None:
self.sim_function = sim_function
# If no resources are specified, use 1 CPU an 0 GPUs.
if n_procs is None and n_gpus is None:
n_procs = 1
n_gpus = 0
# If `n_gpus` is given without specifying `n_procs`, match processes
# to GPUs.
elif n_procs is None:
n_procs = n_gpus
# If `n_procs` is given without specifying `n_gpus`, do not use GPUs.
elif n_gpus is None:
n_gpus = 0
self._n_procs = n_procs
self._n_gpus = n_gpus
self._initialized = False

Expand Down Expand Up @@ -55,6 +73,7 @@ def get_sim_specs(
+ [(var.name, float) for var in varying_parameters]
),
'user': {
'n_procs': self._n_procs,
'n_gpus': self._n_gpus,
}
}
Expand All @@ -67,6 +86,14 @@ def get_libe_specs(self) -> Dict:
libE_specs = {}
return libE_specs

def get_run_params(self) -> Dict:
"""Return run parameters for this evaluator."""
run_params = {
'num_procs': self._n_procs,
'num_gpus': self._n_gpus
}
return run_params

def initialize(self) -> None:
"""Initialize the evaluator."""
if not self._initialized:
Expand Down
16 changes: 12 additions & 4 deletions optimas/evaluators/function_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,26 @@ class FunctionEvaluator(Evaluator):
----------
function : callable
The function to be evaluated.
n_procs : int, optional
The number of processes that will be used for each evaluation. By
default, ``n_procs=1`` if ``n_gpus`` is not given. Otherwise, the
default behavior is to match the number of processes to the number
of GPUs, i.e., ``n_procs=n_gpus``.
n_gpus : int, optional
The number of GPUs that will be made available for each evaluation. BY
default, 1.
The number of GPUs that will be made available for each evaluation. By
default, 0.
"""
def __init__(
self,
function: Callable,
n_gpus: Optional[int] = 1
n_procs: Optional[int] = None,
n_gpus: Optional[int] = None
) -> None:
super().__init__(
sim_function=run_function,
n_gpus=n_gpus)
n_procs=n_procs,
n_gpus=n_gpus
)
self.function = function

def get_sim_specs(
Expand Down
7 changes: 7 additions & 0 deletions optimas/evaluators/multitask_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,13 @@ def get_libe_specs(self) -> Dict:
# Use only the combined specs.
return libE_specs_1

def get_run_params(self) -> Dict:
"""Return run parameters for this evaluator."""
run_params = {}
for task, evaluator in zip(self.tasks, self.task_evaluators):
run_params[task.name] = evaluator.get_run_params()
return run_params

def _initialize(self) -> None:
"""Initialize the evaluator."""
if isinstance(self.task_evaluators[0], TemplateEvaluator):
Expand Down
30 changes: 18 additions & 12 deletions optimas/evaluators/template_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,30 +27,33 @@ class TemplateEvaluator(Evaluator):
sim_files : list of str, optional.
List of files that are needed to carry out the simulation and that
will be copied to the simulation directory.
n_procs : int, optional
The number of processes that will be used for each evaluation. By
default, ``n_procs=1`` if ``n_gpus`` is not given. Otherwise, the
default behavior is to match the number of processes to the number
of GPUs, i.e., ``n_procs=n_gpus``.
n_gpus : int, optional
The number of GPUs that will be made available for each simulation. By
default, 1.
n_proc : int, optional
The number of processes that will be made used for each simulation. By
default, 1. (Currently unused)
The number of GPUs that will be made available for each evaluation. By
default, 0.
"""
def __init__(
self,
sim_template: str,
analysis_func: Callable,
executable: Optional[str] = None,
sim_files: Optional[List[str]] = None,
n_gpus: Optional[int] = 1,
n_proc: Optional[int] = 1
n_procs: Optional[int] = None,
n_gpus: Optional[int] = None
) -> None:
super().__init__(
sim_function=run_template_simulation,
n_gpus=n_gpus)
n_procs=n_procs,
n_gpus=n_gpus
)
self.sim_template = sim_template
self.analysis_func = analysis_func
self.executable = executable
self.sim_files = [] if sim_files is None else sim_files
self.n_proc = n_proc
self._app_name = 'sim'

@property
Expand Down Expand Up @@ -80,16 +83,19 @@ def get_sim_specs(
sim_specs['user']['analysis_func'] = self.analysis_func
sim_specs['user']['sim_template'] = os.path.basename(self.sim_template)
sim_specs['user']['app_name'] = self._app_name
sim_specs['user']['n_proc'] = self.n_proc
return sim_specs

def get_libe_specs(self) -> Dict:
"""Get a dictionary with the ``libE_specs`` as expected
by ``libEnsemble``
"""
libE_specs = super().get_libe_specs()
# Add sim_template and sim_files to the list of files to be copied
libE_specs['sim_dir_copy_files'] = [self.sim_template] + self.sim_files
# Add sim_template and sim_files to the list of files to be copied.
# Use the absolute path to the files to get around a libEnsemble bug
# when using a workflow dir.
sim_files = [self.sim_template] + self.sim_files
sim_files = [os.path.abspath(file) for file in sim_files]
libE_specs['sim_dir_copy_files'] = sim_files
# Force libEnsemble to create a directory for each simulation
# default value, if not defined
libE_specs['sim_dirs_make'] = True
Expand Down
38 changes: 34 additions & 4 deletions optimas/explorations/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
from libensemble.tools import save_libE_output, add_unique_random_streams
from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens
from libensemble.executors.mpi_executor import MPIExecutor
from libensemble.resources.resources import Resources
from libensemble.executors.executor import Executor
from libensemble.logger import LogConfig

from optimas.generators.base import Generator
from optimas.evaluators.base import Evaluator
Expand Down Expand Up @@ -88,11 +91,15 @@ def run(self) -> None:
persis_info = add_unique_random_streams({}, self.sim_workers + 2)

# If specified, allocate dedicated resources for the generator.
if self.generator.dedicated_resources:
if self.generator.dedicated_resources and self.generator.use_cuda:
persis_info['gen_resources'] = 1
persis_info['gen_use_gpus'] = True
else:
self.libE_specs['zero_resource_workers'] = [1]

# Get gen_specs and sim_specs.
gen_specs = self.generator.get_gen_specs(self.sim_workers)
run_params = self.evaluator.get_run_params()
gen_specs = self.generator.get_gen_specs(self.sim_workers, run_params)
sim_specs = self.evaluator.get_sim_specs(
self.generator.varying_parameters,
self.generator.objectives,
Expand Down Expand Up @@ -127,7 +134,12 @@ def run(self) -> None:

# Save history.
if is_master:
save_libE_output(history, persis_info, __file__, nworkers)
save_libE_output(
history, persis_info, __file__, nworkers,
dest_path=os.path.abspath(self.exploration_dir_path))

# Reset state of libEnsemble.
self._reset_libensemble()

def _create_executor(self) -> None:
"""Create libEnsemble executor."""
Expand Down Expand Up @@ -189,7 +201,9 @@ def _set_default_libe_specs(self) -> None:
+ " not recognized. Possible values are 'local' or 'mpi'."
)
# Set exploration directory path.
libE_specs['ensemble_dir_path'] = self.exploration_dir_path
libE_specs['ensemble_dir_path'] = 'evaluations'
libE_specs['use_workflow_dir'] = True
libE_specs['workflow_dir_path'] = self.exploration_dir_path

# get specs from generator and evaluator
gen_libE_specs = self.generator.get_libe_specs()
Expand All @@ -205,3 +219,19 @@ def _create_alloc_specs(self) -> None:
'async_return': self.run_async
}
}

def _reset_libensemble(self) -> None:
"""Reset the state of libEnsemble.
After calling `libE`, some libEnsemble attributes do not come back to
their original states. This leads to issues if another `Exploration`
run is launched within the same script. This method resets the
necessary libEnsemble attributes to their original state.
"""
if Resources.resources is not None:
del Resources.resources
Resources.resources = None
if Executor.executor is not None:
del Executor.executor
Executor.executor = None
LogConfig.config.logger_set = False
12 changes: 8 additions & 4 deletions optimas/gen_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def persistent_generator(H, persis_info, gen_specs, libE_info):
# GPU. This GPU will only be used for the generator and will not be
# available for the simulation workers.
else:
resources.set_env_to_slots('CUDA_VISIBLE_DEVICES')
resources.set_env_to_gpus('CUDA_VISIBLE_DEVICES')

# Get generator, objectives, and parameters to analyze.
generator = gen_specs['user']['generator']
Expand Down Expand Up @@ -57,15 +57,19 @@ def persistent_generator(H, persis_info, gen_specs, libE_info):
for var, val in zip(trial.varying_parameters,
trial.parameter_values):
H_o[var.name][i] = val
run_params = gen_specs['user']['run_params']
if 'task' in H_o.dtype.names:
H_o['task'][i] = trial.trial_type
run_params = run_params[trial.trial_type]
if trial.custom_parameters is not None:
for par in trial.custom_parameters:
H_o[par.save_name][i] = getattr(trial, par.name)
H_o['trial_index'][i] = trial.index
H_o['resource_sets'][i] = 1
n_failed_gens = np.sum(H_o['resource_sets'] == 0)
H_o = H_o[H_o['resource_sets'] > 0]
H_o['num_procs'][i] = run_params["num_procs"]
H_o['num_gpus'][i] = run_params["num_gpus"]

n_failed_gens = np.sum(H_o['num_procs'] == 0)
H_o = H_o[H_o['num_procs'] > 0]

# Send data and get results from finished simulation
# Blocking call: waits for simulation results to be sent by the manager
Expand Down
5 changes: 3 additions & 2 deletions optimas/generators/ax/developer/multitask.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,12 @@ def __init__(

def get_gen_specs(
self,
sim_workers: int
sim_workers: int,
run_params: dict
) -> Dict:
"""Get the libEnsemble gen_specs."""
# Get base specs.
gen_specs = super().get_gen_specs(sim_workers)
gen_specs = super().get_gen_specs(sim_workers, run_params)
# Add task to output parameters.
max_length = max([len(self.lofi_task.name), len(self.hifi_task.name)])
gen_specs['out'].append(('task', str, max_length))
Expand Down
10 changes: 7 additions & 3 deletions optimas/generators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,8 @@ def save_model_to_file(self) -> None:

def get_gen_specs(
self,
sim_workers: int
sim_workers: int,
run_params: dict
) -> Dict:
"""Get the libEnsemble gen_specs.
Expand All @@ -265,7 +266,8 @@ def get_gen_specs(
),
'out': (
[(var.name, var.dtype) for var in self._varying_parameters] +
[('resource_sets', int), ('trial_index', int)] +
[('num_procs', int), ('num_gpus', int)] +
[('trial_index', int)] +
[(par.save_name, par.dtype)
for par in self._custom_trial_parameters]
),
Expand All @@ -277,7 +279,9 @@ def get_gen_specs(
# Allow generator to run on GPU.
'use_cuda': self._use_cuda,
# GPU in which to run generator.
'gpu_id': self._gpu_id
'gpu_id': self._gpu_id,
# num of procs and gpus required
'run_params': run_params
}
}
return gen_specs
Expand Down
Loading

0 comments on commit 808051e

Please sign in to comment.