From d41225ae2ccc48a7eb5a50a646bdfcae59a69ab1 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Fri, 17 Mar 2023 01:28:08 +0100 Subject: [PATCH] [RLlib] Remove all default config objects and rllib/agents (#33242) Signed-off-by: Artur Niederfahrenhorst Signed-off-by: elliottower --- rllib/algorithms/a2c/__init__.py | 4 +- rllib/algorithms/a2c/a2c.py | 18 ------- rllib/algorithms/a3c/__init__.py | 4 +- rllib/algorithms/a3c/a3c.py | 18 ------- rllib/algorithms/algorithm_config.py | 6 +-- rllib/algorithms/alpha_star/__init__.py | 2 - rllib/algorithms/alpha_star/alpha_star.py | 18 ------- rllib/algorithms/alpha_zero/__init__.py | 2 - rllib/algorithms/alpha_zero/alpha_zero.py | 19 +------- rllib/algorithms/apex_ddpg/__init__.py | 2 - rllib/algorithms/apex_ddpg/apex_ddpg.py | 19 +------- rllib/algorithms/apex_dqn/__init__.py | 2 - rllib/algorithms/apex_dqn/apex_dqn.py | 19 +------- rllib/algorithms/appo/__init__.py | 3 +- rllib/algorithms/appo/appo.py | 18 ------- rllib/algorithms/ars/__init__.py | 3 +- rllib/algorithms/ars/ars.py | 18 ------- rllib/algorithms/ars/ars_torch_policy.py | 2 +- rllib/algorithms/bandit/bandit.py | 18 ------- rllib/algorithms/bandit/bandit_tf_policy.py | 2 +- rllib/algorithms/bc/__init__.py | 4 +- rllib/algorithms/bc/bc.py | 18 ------- rllib/algorithms/cql/__init__.py | 3 +- rllib/algorithms/cql/cql.py | 18 ------- rllib/algorithms/cql/cql_tf_policy.py | 2 +- rllib/algorithms/cql/cql_torch_policy.py | 2 +- rllib/algorithms/ddpg/__init__.py | 3 +- rllib/algorithms/ddpg/ddpg.py | 18 ------- rllib/algorithms/ddppo/__init__.py | 3 +- rllib/algorithms/ddppo/ddppo.py | 18 ------- rllib/algorithms/dqn/__init__.py | 3 +- rllib/algorithms/dqn/dqn.py | 17 ------- rllib/algorithms/dqn/dqn_tf_policy.py | 2 +- rllib/algorithms/dqn/dqn_torch_policy.py | 2 +- rllib/algorithms/dreamer/__init__.py | 2 - rllib/algorithms/dreamer/dreamer.py | 18 ------- rllib/algorithms/es/__init__.py | 4 +- rllib/algorithms/es/es.py | 17 ------- rllib/algorithms/es/es_torch_policy.py | 2 +- rllib/algorithms/impala/__init__.py | 3 +- rllib/algorithms/impala/impala.py | 18 ------- rllib/algorithms/maddpg/__init__.py | 3 +- rllib/algorithms/maddpg/maddpg.py | 19 +------- rllib/algorithms/maml/__init__.py | 3 +- rllib/algorithms/maml/maml.py | 19 +------- rllib/algorithms/maml/maml_torch_policy.py | 2 +- rllib/algorithms/marwil/__init__.py | 3 -- rllib/algorithms/marwil/marwil.py | 19 +------- rllib/algorithms/mbmpo/__init__.py | 3 +- rllib/algorithms/mbmpo/mbmpo.py | 19 +------- rllib/algorithms/pg/__init__.py | 4 +- rllib/algorithms/pg/pg.py | 18 ------- rllib/algorithms/ppo/__init__.py | 3 +- rllib/algorithms/ppo/ppo.py | 20 +------- rllib/algorithms/qmix/__init__.py | 4 +- rllib/algorithms/qmix/qmix.py | 18 ------- rllib/algorithms/r2d2/__init__.py | 3 +- rllib/algorithms/r2d2/r2d2.py | 18 ------- rllib/algorithms/r2d2/r2d2_tf_policy.py | 2 +- rllib/algorithms/r2d2/r2d2_torch_policy.py | 2 +- rllib/algorithms/sac/__init__.py | 10 +--- rllib/algorithms/sac/rnnsac.py | 18 +------ rllib/algorithms/sac/rnnsac_torch_policy.py | 2 +- rllib/algorithms/sac/sac.py | 18 ------- rllib/algorithms/sac/sac_tf_policy.py | 2 +- rllib/algorithms/sac/sac_torch_policy.py | 2 +- rllib/algorithms/simple_q/__init__.py | 2 - rllib/algorithms/simple_q/simple_q.py | 19 +------- rllib/algorithms/slateq/__init__.py | 2 - rllib/algorithms/slateq/slateq.py | 19 +------- rllib/algorithms/slateq/slateq_tf_policy.py | 2 +- .../algorithms/slateq/slateq_torch_policy.py | 2 +- rllib/algorithms/td3/__init__.py | 3 +- rllib/algorithms/td3/td3.py | 18 ------- .../backward_compat/test_backward_compat.py | 28 ----------- rllib/tests/test_gpus.py | 31 ++++++------ rllib/tests/test_local.py | 14 +++--- rllib/tests/test_nested_action_spaces.py | 10 ++-- rllib/tests/test_placement_groups.py | 47 +++++++++++-------- 79 files changed, 108 insertions(+), 697 deletions(-) diff --git a/rllib/algorithms/a2c/__init__.py b/rllib/algorithms/a2c/__init__.py index dfc7de5a6fa9..8509d972b0b2 100644 --- a/rllib/algorithms/a2c/__init__.py +++ b/rllib/algorithms/a2c/__init__.py @@ -1,3 +1,3 @@ -from ray.rllib.algorithms.a2c.a2c import A2CConfig, A2C, A2C_DEFAULT_CONFIG +from ray.rllib.algorithms.a2c.a2c import A2CConfig, A2C -__all__ = ["A2CConfig", "A2C", "A2C_DEFAULT_CONFIG"] +__all__ = ["A2CConfig", "A2C"] diff --git a/rllib/algorithms/a2c/a2c.py b/rllib/algorithms/a2c/a2c.py index c2b666b99325..92ebcee5f99a 100644 --- a/rllib/algorithms/a2c/a2c.py +++ b/rllib/algorithms/a2c/a2c.py @@ -10,7 +10,6 @@ ) from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( APPLY_GRADS_TIMER, COMPUTE_GRADS_TIMER, @@ -241,20 +240,3 @@ def training_step(self) -> ResultDict: train_results = {DEFAULT_POLICY_ID: info} return train_results - - -# Deprecated: Use ray.rllib.algorithms.a2c.A2CConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(A2CConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.a3c.a2c.A2C_DEFAULT_CONFIG", - new="ray.rllib.algorithms.a2c.a2c.A2CConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -A2C_DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/a3c/__init__.py b/rllib/algorithms/a3c/__init__.py index 415afda039c7..e003029d470f 100644 --- a/rllib/algorithms/a3c/__init__.py +++ b/rllib/algorithms/a3c/__init__.py @@ -1,3 +1,3 @@ -from ray.rllib.algorithms.a3c.a3c import A3CConfig, A3C, DEFAULT_CONFIG +from ray.rllib.algorithms.a3c.a3c import A3CConfig, A3C -__all__ = ["A3CConfig", "A3C", "DEFAULT_CONFIG"] +__all__ = ["A3CConfig", "A3C"] diff --git a/rllib/algorithms/a3c/a3c.py b/rllib/algorithms/a3c/a3c.py index e6a119dd10c6..4440266fa200 100644 --- a/rllib/algorithms/a3c/a3c.py +++ b/rllib/algorithms/a3c/a3c.py @@ -6,7 +6,6 @@ from ray.rllib.evaluation.rollout_worker import RolloutWorker from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( APPLY_GRADS_TIMER, GRAD_WAIT_TIMER, @@ -251,20 +250,3 @@ def sample_and_compute_grads(worker: RolloutWorker) -> Dict[str, Any]: ) return learner_info_builder.finalize() - - -# Deprecated: Use ray.rllib.algorithms.a3c.A3CConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(A3CConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG", - new="ray.rllib.algorithms.a3c.a3c.A3CConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index bc5009060261..293a5d56551e 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -159,9 +159,9 @@ def from_dict(cls, config_dict: dict) -> "AlgorithmConfig": """Creates an AlgorithmConfig from a legacy python config dict. Examples: - >>> from ray.rllib.algorithms.ppo.ppo import DEFAULT_CONFIG, PPOConfig - >>> ppo_config = PPOConfig.from_dict(DEFAULT_CONFIG) - >>> ppo = ppo_config.build(env="Pendulum-v1") + >>> from ray.rllib.algorithms.ppo.ppo import PPOConfig # doctest: +SKIP + >>> ppo_config = PPOConfig.from_dict({...}) # doctest: +SKIP + >>> ppo = ppo_config.build(env="Pendulum-v1") # doctest: +SKIP Args: config_dict: The legacy formatted python config dict for some algorithm. diff --git a/rllib/algorithms/alpha_star/__init__.py b/rllib/algorithms/alpha_star/__init__.py index b85e5dcf3632..53e1e3563561 100644 --- a/rllib/algorithms/alpha_star/__init__.py +++ b/rllib/algorithms/alpha_star/__init__.py @@ -1,11 +1,9 @@ from ray.rllib.algorithms.alpha_star.alpha_star import ( AlphaStar, AlphaStarConfig, - DEFAULT_CONFIG, ) __all__ = [ "AlphaStar", "AlphaStarConfig", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/alpha_star/alpha_star.py b/rllib/algorithms/alpha_star/alpha_star.py index e829bde1900a..02d05fcc4324 100644 --- a/rllib/algorithms/alpha_star/alpha_star.py +++ b/rllib/algorithms/alpha_star/alpha_star.py @@ -21,7 +21,6 @@ from ray.rllib.policy.sample_batch import MultiAgentBatch from ray.rllib.utils import deep_update from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.from_config import from_config from ray.rllib.utils.metrics import ( LAST_TARGET_UPDATE_TS, @@ -635,20 +634,3 @@ def __setstate__(self, state: dict) -> None: state_copy = state.copy() self.league_builder.__setstate__(state.pop("league_builder", {})) super().__setstate__(state_copy) - - -# Deprecated: Use ray.rllib.algorithms.alpha_star.AlphaStarConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(AlphaStarConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.alpha_star.alpha_star.DEFAULT_CONFIG", - new="ray.rllib.algorithms.alpha_star.alpha_star.AlphaStarConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/alpha_zero/__init__.py b/rllib/algorithms/alpha_zero/__init__.py index e334948951d4..6fbfd3af71bb 100644 --- a/rllib/algorithms/alpha_zero/__init__.py +++ b/rllib/algorithms/alpha_zero/__init__.py @@ -1,7 +1,6 @@ from ray.rllib.algorithms.alpha_zero.alpha_zero import ( AlphaZero, AlphaZeroConfig, - DEFAULT_CONFIG, ) from ray.rllib.algorithms.alpha_zero.alpha_zero_policy import AlphaZeroPolicy @@ -9,5 +8,4 @@ "AlphaZero", "AlphaZeroConfig", "AlphaZeroPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/alpha_zero/alpha_zero.py b/rllib/algorithms/alpha_zero/alpha_zero.py index 2bec09df274e..0119cd0af7b3 100644 --- a/rllib/algorithms/alpha_zero/alpha_zero.py +++ b/rllib/algorithms/alpha_zero/alpha_zero.py @@ -16,7 +16,7 @@ from ray.rllib.models.torch.torch_action_dist import TorchCategorical from ray.rllib.policy.policy import Policy from ray.rllib.policy.sample_batch import concat_samples -from ray.rllib.utils.annotations import Deprecated, override +from ray.rllib.utils.annotations import override from ray.rllib.utils.deprecation import DEPRECATED_VALUE from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.metrics import ( @@ -400,20 +400,3 @@ def training_step(self) -> ResultDict: # Return all collected metrics for the iteration. return train_results - - -# Deprecated: Use ray.rllib.algorithms.alpha_zero.AlphaZeroConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(AlphaZeroConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.alpha_zero.alpha_zero.DEFAULT_CONFIG", - new="ray.rllib.algorithms.alpha_zero.alpha_zero.AlphaZeroConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/apex_ddpg/__init__.py b/rllib/algorithms/apex_ddpg/__init__.py index 9d4e62225f15..e5de02cf71bc 100644 --- a/rllib/algorithms/apex_ddpg/__init__.py +++ b/rllib/algorithms/apex_ddpg/__init__.py @@ -1,11 +1,9 @@ from ray.rllib.algorithms.apex_ddpg.apex_ddpg import ( ApexDDPG, ApexDDPGConfig, - APEX_DDPG_DEFAULT_CONFIG, ) __all__ = [ "ApexDDPG", "ApexDDPGConfig", - "APEX_DDPG_DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/apex_ddpg/apex_ddpg.py b/rllib/algorithms/apex_ddpg/apex_ddpg.py index c3358e7a4295..0900de2a2921 100644 --- a/rllib/algorithms/apex_ddpg/apex_ddpg.py +++ b/rllib/algorithms/apex_ddpg/apex_ddpg.py @@ -4,7 +4,7 @@ from ray.rllib.algorithms.apex_dqn.apex_dqn import ApexDQN from ray.rllib.algorithms.ddpg.ddpg import DDPG, DDPGConfig from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import DEPRECATED_VALUE, Deprecated +from ray.rllib.utils.deprecation import DEPRECATED_VALUE from ray.rllib.utils.typing import ( ResultDict, ) @@ -147,20 +147,3 @@ def setup(self, config: AlgorithmConfig): def training_step(self) -> ResultDict: """Use APEX-DQN's training iteration function.""" return ApexDQN.training_step(self) - - -# Deprecated: Use ray.rllib.algorithms.apex_ddpg.ApexDDPGConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(ApexDDPGConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.ddpg.apex.APEX_DDPG_DEFAULT_CONFIG", - new="ray.rllib.algorithms.apex_ddpg.apex_ddpg::ApexDDPGConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -APEX_DDPG_DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/apex_dqn/__init__.py b/rllib/algorithms/apex_dqn/__init__.py index 57e718b7295e..e8385b3e302e 100644 --- a/rllib/algorithms/apex_dqn/__init__.py +++ b/rllib/algorithms/apex_dqn/__init__.py @@ -1,11 +1,9 @@ from ray.rllib.algorithms.apex_dqn.apex_dqn import ( ApexDQN, ApexDQNConfig, - APEX_DEFAULT_CONFIG, ) __all__ = [ "ApexDQN", "ApexDQNConfig", - "APEX_DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/apex_dqn/apex_dqn.py b/rllib/algorithms/apex_dqn/apex_dqn.py index 5eea96eccc8e..498113fef90e 100644 --- a/rllib/algorithms/apex_dqn/apex_dqn.py +++ b/rllib/algorithms/apex_dqn/apex_dqn.py @@ -29,7 +29,7 @@ from ray.rllib.utils.actor_manager import FaultTolerantActorManager from ray.rllib.utils.actors import create_colocated_actors from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import DEPRECATED_VALUE, Deprecated +from ray.rllib.utils.deprecation import DEPRECATED_VALUE from ray.rllib.utils.metrics import ( LAST_TARGET_UPDATE_TS, NUM_AGENT_STEPS_SAMPLED, @@ -753,20 +753,3 @@ def default_resource_request( ), strategy=cf.placement_strategy, ) - - -# Deprecated: Use ray.rllib.algorithms.apex_dqn.ApexDQNConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(ApexDQNConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.dqn.apex.APEX_DEFAULT_CONFIG", - new="ray.rllib.algorithms.apex_dqn.apex_dqn.ApexDQNConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -APEX_DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/appo/__init__.py b/rllib/algorithms/appo/__init__.py index cc03908e2a3d..acefcd6c95ae 100644 --- a/rllib/algorithms/appo/__init__.py +++ b/rllib/algorithms/appo/__init__.py @@ -1,4 +1,4 @@ -from ray.rllib.algorithms.appo.appo import APPO, APPOConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.appo.appo import APPO, APPOConfig from ray.rllib.algorithms.appo.appo_tf_policy import APPOTF1Policy, APPOTF2Policy from ray.rllib.algorithms.appo.appo_torch_policy import APPOTorchPolicy @@ -8,5 +8,4 @@ "APPOTF1Policy", "APPOTF2Policy", "APPOTorchPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 46b948c83bc3..eba5fdbb01b5 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -18,7 +18,6 @@ from ray.rllib.execution.common import _get_shared_metrics, STEPS_SAMPLED_COUNTER from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( LAST_TARGET_UPDATE_TS, NUM_AGENT_STEPS_SAMPLED, @@ -294,20 +293,3 @@ def get_default_policy_class( from ray.rllib.algorithms.appo.appo_tf_policy import APPOTF2Policy return APPOTF2Policy - - -# Deprecated: Use ray.rllib.algorithms.appo.APPOConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(APPOConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.ppo.appo::DEFAULT_CONFIG", - new="ray.rllib.algorithms.appo.appo::APPOConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/ars/__init__.py b/rllib/algorithms/ars/__init__.py index 92997a1ce125..bdac9d1752a6 100644 --- a/rllib/algorithms/ars/__init__.py +++ b/rllib/algorithms/ars/__init__.py @@ -1,4 +1,4 @@ -from ray.rllib.algorithms.ars.ars import ARS, ARSConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.ars.ars import ARS, ARSConfig from ray.rllib.algorithms.ars.ars_tf_policy import ARSTFPolicy from ray.rllib.algorithms.ars.ars_torch_policy import ARSTorchPolicy @@ -7,5 +7,4 @@ "ARSConfig", "ARSTFPolicy", "ARSTorchPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/ars/ars.py b/rllib/algorithms/ars/ars.py index 96c97279b9ff..1d27a13f0963 100644 --- a/rllib/algorithms/ars/ars.py +++ b/rllib/algorithms/ars/ars.py @@ -21,7 +21,6 @@ from ray.rllib.utils import FilterManager from ray.rllib.utils.actor_manager import FaultAwareApply from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( NUM_AGENT_STEPS_SAMPLED, NUM_AGENT_STEPS_TRAINED, @@ -605,20 +604,3 @@ def __setstate__(self, state): FilterManager.synchronize( {DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers ) - - -# Deprecated: Use ray.rllib.algorithms.ars.ARSConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(ARSConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.ars.ars.DEFAULT_CONFIG", - new="ray.rllib.algorithms.ars.ars.ARSConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/ars/ars_torch_policy.py b/rllib/algorithms/ars/ars_torch_policy.py index aae154035506..b5d6497894f1 100644 --- a/rllib/algorithms/ars/ars_torch_policy.py +++ b/rllib/algorithms/ars/ars_torch_policy.py @@ -13,7 +13,7 @@ name="ARSTorchPolicy", framework="torch", loss_fn=None, - get_default_config=lambda: ray.rllib.algorithms.ars.ars.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.ars.ars.ARSConfig(), before_init=before_init, after_init=after_init, make_model_and_action_dist=make_model_and_action_dist, diff --git a/rllib/algorithms/bandit/bandit.py b/rllib/algorithms/bandit/bandit.py index 2129a719e2a4..b5e278bd090f 100644 --- a/rllib/algorithms/bandit/bandit.py +++ b/rllib/algorithms/bandit/bandit.py @@ -7,7 +7,6 @@ from ray.rllib.algorithms.bandit.bandit_torch_policy import BanditTorchPolicy from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated logger = logging.getLogger(__name__) @@ -121,20 +120,3 @@ def get_default_policy_class( return BanditTFPolicy else: raise NotImplementedError("Only `framework=[torch|tf2]` supported!") - - -# Deprecated: Use ray.rllib.algorithms.bandit.BanditLinUCBConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(BanditLinUCBConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.bandit.bandit.DEFAULT_CONFIG", - new="ray.rllib.algorithms.bandit.bandit.BanditLin[UCB|TS]Config(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/bandit/bandit_tf_policy.py b/rllib/algorithms/bandit/bandit_tf_policy.py index a9fde50dc17e..8527407ec57f 100644 --- a/rllib/algorithms/bandit/bandit_tf_policy.py +++ b/rllib/algorithms/bandit/bandit_tf_policy.py @@ -149,7 +149,7 @@ def after_init(policy, *args): BanditTFPolicy = build_tf_policy( name="BanditTFPolicy", - get_default_config=lambda: ray.rllib.algorithms.bandit.bandit.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.bandit.bandit.BanditConfig(), validate_spaces=validate_spaces, make_model=make_model, loss_fn=None, diff --git a/rllib/algorithms/bc/__init__.py b/rllib/algorithms/bc/__init__.py index 5f2a3ed086e8..d746e04e508f 100644 --- a/rllib/algorithms/bc/__init__.py +++ b/rllib/algorithms/bc/__init__.py @@ -1,8 +1,6 @@ -from ray.rllib.algorithms.bc.bc import BCConfig, BC, BC_DEFAULT_CONFIG +from ray.rllib.algorithms.bc.bc import BCConfig, BC __all__ = [ "BCConfig", "BC", - # Deprecated. - "BC_DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/bc/bc.py b/rllib/algorithms/bc/bc.py index 905c2813fc80..97e546b5a62c 100644 --- a/rllib/algorithms/bc/bc.py +++ b/rllib/algorithms/bc/bc.py @@ -1,7 +1,6 @@ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig from ray.rllib.algorithms.marwil.marwil import MARWIL, MARWILConfig from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated class BCConfig(MARWILConfig): @@ -74,20 +73,3 @@ class BC(MARWIL): @override(MARWIL) def get_default_config(cls) -> AlgorithmConfig: return BCConfig() - - -# Deprecated: Use ray.rllib.algorithms.bc.BCConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(BCConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.marwil.bc::DEFAULT_CONFIG", - new="ray.rllib.algorithms.bc.bc::BCConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -BC_DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/cql/__init__.py b/rllib/algorithms/cql/__init__.py index 10c7c3e7450c..91b0cc69acd7 100644 --- a/rllib/algorithms/cql/__init__.py +++ b/rllib/algorithms/cql/__init__.py @@ -1,9 +1,8 @@ -from ray.rllib.algorithms.cql.cql import CQL, DEFAULT_CONFIG, CQLConfig +from ray.rllib.algorithms.cql.cql import CQL, CQLConfig from ray.rllib.algorithms.cql.cql_torch_policy import CQLTorchPolicy __all__ = [ "CQL", "CQLTorchPolicy", "CQLConfig", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/cql/cql.py b/rllib/algorithms/cql/cql.py index 262be7c17c18..c4aeaae91aad 100644 --- a/rllib/algorithms/cql/cql.py +++ b/rllib/algorithms/cql/cql.py @@ -20,7 +20,6 @@ from ray.rllib.utils.deprecation import ( DEPRECATED_VALUE, deprecation_warning, - Deprecated, ) from ray.rllib.utils.framework import try_import_tf, try_import_tfp from ray.rllib.utils.metrics import ( @@ -213,20 +212,3 @@ def training_step(self) -> ResultDict: # Return all collected metrics for the iteration. return train_results - - -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(CQLConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.cql.cql::DEFAULT_CONFIG", - new="ray.rllib.algorithms.cql.cql::CQLConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() -CQL_DEFAULT_CONFIG = DEFAULT_CONFIG diff --git a/rllib/algorithms/cql/cql_tf_policy.py b/rllib/algorithms/cql/cql_tf_policy.py index a134ad7540d4..2aaecf01e2be 100644 --- a/rllib/algorithms/cql/cql_tf_policy.py +++ b/rllib/algorithms/cql/cql_tf_policy.py @@ -411,7 +411,7 @@ def apply_gradients_fn(policy, optimizer, grads_and_vars): CQLTFPolicy = build_tf_policy( name="CQLTFPolicy", loss_fn=cql_loss, - get_default_config=lambda: ray.rllib.algorithms.cql.cql.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.cql.cql.CQLConfig(), validate_spaces=validate_spaces, stats_fn=cql_stats, postprocess_fn=postprocess_trajectory, diff --git a/rllib/algorithms/cql/cql_torch_policy.py b/rllib/algorithms/cql/cql_torch_policy.py index 7a6bf60e77e2..ec8b1ab5a5e9 100644 --- a/rllib/algorithms/cql/cql_torch_policy.py +++ b/rllib/algorithms/cql/cql_torch_policy.py @@ -390,7 +390,7 @@ def apply_gradients_fn(policy, gradients): name="CQLTorchPolicy", framework="torch", loss_fn=cql_loss, - get_default_config=lambda: ray.rllib.algorithms.cql.cql.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.cql.cql.CQLConfig(), stats_fn=cql_stats, postprocess_fn=postprocess_trajectory, extra_grad_process_fn=apply_grad_clipping, diff --git a/rllib/algorithms/ddpg/__init__.py b/rllib/algorithms/ddpg/__init__.py index 04639c7c3079..cd6e7a9c6e50 100644 --- a/rllib/algorithms/ddpg/__init__.py +++ b/rllib/algorithms/ddpg/__init__.py @@ -1,8 +1,7 @@ -from ray.rllib.algorithms.ddpg.ddpg import DDPG, DDPGConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.ddpg.ddpg import DDPG, DDPGConfig __all__ = [ "DDPG", "DDPGConfig", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/ddpg/ddpg.py b/rllib/algorithms/ddpg/ddpg.py index fe3cbb07cc1e..2baaa3be1922 100644 --- a/rllib/algorithms/ddpg/ddpg.py +++ b/rllib/algorithms/ddpg/ddpg.py @@ -6,7 +6,6 @@ from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override from ray.rllib.utils.deprecation import DEPRECATED_VALUE -from ray.rllib.utils.deprecation import Deprecated logger = logging.getLogger(__name__) @@ -312,20 +311,3 @@ def get_default_policy_class( from ray.rllib.algorithms.ddpg.ddpg_tf_policy import DDPGTF2Policy return DDPGTF2Policy - - -# Deprecated: Use ray.rllib.algorithms.ddpg.DDPGConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(DDPGConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.ddpg.ddpg::DEFAULT_CONFIG", - new="ray.rllib.algorithms.ddpg.ddpg.DDPGConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/ddppo/__init__.py b/rllib/algorithms/ddppo/__init__.py index b33460d78c4c..7f50fd14cafb 100644 --- a/rllib/algorithms/ddppo/__init__.py +++ b/rllib/algorithms/ddppo/__init__.py @@ -1,7 +1,6 @@ -from ray.rllib.algorithms.ddppo.ddppo import DDPPOConfig, DDPPO, DEFAULT_CONFIG +from ray.rllib.algorithms.ddppo.ddppo import DDPPOConfig, DDPPO __all__ = [ "DDPPOConfig", "DDPPO", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/ddppo/ddppo.py b/rllib/algorithms/ddppo/ddppo.py index ab19148c68b3..f849d58292a9 100644 --- a/rllib/algorithms/ddppo/ddppo.py +++ b/rllib/algorithms/ddppo/ddppo.py @@ -25,7 +25,6 @@ from ray.rllib.evaluation.postprocessing import Postprocessing from ray.rllib.evaluation.rollout_worker import RolloutWorker from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( LEARN_ON_BATCH_TIMER, NUM_AGENT_STEPS_SAMPLED, @@ -361,20 +360,3 @@ def _sample_and_train_torch_distributed(worker: RolloutWorker): "sample_time": sample_time, "learn_on_batch_time": learn_on_batch_time, } - - -# Deprecated: Use ray.rllib.algorithms.ddppo.DDPPOConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(DDPPOConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.ppo.ddppo::DEFAULT_CONFIG", - new="ray.rllib.algorithms.ddppo.ddppo::DDPPOConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/dqn/__init__.py b/rllib/algorithms/dqn/__init__.py index 1a1b949de105..f6046b585028 100644 --- a/rllib/algorithms/dqn/__init__.py +++ b/rllib/algorithms/dqn/__init__.py @@ -1,4 +1,4 @@ -from ray.rllib.algorithms.dqn.dqn import DQN, DQNConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.dqn.dqn import DQN, DQNConfig from ray.rllib.algorithms.dqn.dqn_tf_policy import DQNTFPolicy from ray.rllib.algorithms.dqn.dqn_torch_policy import DQNTorchPolicy @@ -7,5 +7,4 @@ "DQNConfig", "DQNTFPolicy", "DQNTorchPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/dqn/dqn.py b/rllib/algorithms/dqn/dqn.py index 00ece2b5790d..cef8eb27814e 100644 --- a/rllib/algorithms/dqn/dqn.py +++ b/rllib/algorithms/dqn/dqn.py @@ -477,23 +477,6 @@ def training_step(self) -> ResultDict: return train_results -# Deprecated: Use ray.rllib.algorithms.dqn.DQNConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(DQNConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.dqn.dqn.DEFAULT_CONFIG", - new="ray.rllib.algorithms.dqn.dqn.DQNConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() - - @Deprecated(new="Sub-class directly from `DQN` and override its methods", error=True) class GenericOffPolicyTrainer(SimpleQ): pass diff --git a/rllib/algorithms/dqn/dqn_tf_policy.py b/rllib/algorithms/dqn/dqn_tf_policy.py index 70111d613e70..43a0482b1349 100644 --- a/rllib/algorithms/dqn/dqn_tf_policy.py +++ b/rllib/algorithms/dqn/dqn_tf_policy.py @@ -479,7 +479,7 @@ def postprocess_nstep_and_prio( DQNTFPolicy = build_tf_policy( name="DQNTFPolicy", - get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DQNConfig(), make_model=build_q_model, action_distribution_fn=get_distribution_inputs_and_class, loss_fn=build_q_losses, diff --git a/rllib/algorithms/dqn/dqn_torch_policy.py b/rllib/algorithms/dqn/dqn_torch_policy.py index ae745a39c035..a711792b4039 100644 --- a/rllib/algorithms/dqn/dqn_torch_policy.py +++ b/rllib/algorithms/dqn/dqn_torch_policy.py @@ -487,7 +487,7 @@ def extra_action_out_fn( name="DQNTorchPolicy", framework="torch", loss_fn=build_q_losses, - get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DQNConfig(), make_model_and_action_dist=build_q_model_and_distribution, action_distribution_fn=get_distribution_inputs_and_class, stats_fn=build_q_stats, diff --git a/rllib/algorithms/dreamer/__init__.py b/rllib/algorithms/dreamer/__init__.py index 6b8f49436704..6e5bc9b6403e 100644 --- a/rllib/algorithms/dreamer/__init__.py +++ b/rllib/algorithms/dreamer/__init__.py @@ -1,11 +1,9 @@ from ray.rllib.algorithms.dreamer.dreamer import ( Dreamer, DreamerConfig, - DEFAULT_CONFIG, ) __all__ = [ "Dreamer", "DreamerConfig", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/dreamer/dreamer.py b/rllib/algorithms/dreamer/dreamer.py index bb6114ed3071..63b3d2243800 100644 --- a/rllib/algorithms/dreamer/dreamer.py +++ b/rllib/algorithms/dreamer/dreamer.py @@ -19,7 +19,6 @@ synchronous_parallel_sample, ) from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( NUM_AGENT_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED, @@ -397,20 +396,3 @@ def training_step(self) -> ResultDict: self.local_replay_buffer.add(batch) return fetches - - -# Deprecated: Use ray.rllib.algorithms.dreamer.DreamerConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(DreamerConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.dreamer.dreamer.DEFAULT_CONFIG", - new="ray.rllib.algorithms.dreamer.dreamer.DreamerConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/es/__init__.py b/rllib/algorithms/es/__init__.py index fa10d6289735..3533ac47c7c5 100644 --- a/rllib/algorithms/es/__init__.py +++ b/rllib/algorithms/es/__init__.py @@ -1,5 +1,5 @@ -from ray.rllib.algorithms.es.es import ES, ESConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.es.es import ES, ESConfig from ray.rllib.algorithms.es.es_tf_policy import ESTFPolicy from ray.rllib.algorithms.es.es_torch_policy import ESTorchPolicy -__all__ = ["ES", "ESConfig", "ESTFPolicy", "ESTorchPolicy", "DEFAULT_CONFIG"] +__all__ = ["ES", "ESConfig", "ESTFPolicy", "ESTorchPolicy"] diff --git a/rllib/algorithms/es/es.py b/rllib/algorithms/es/es.py index fd6febc1144c..18f58897746e 100644 --- a/rllib/algorithms/es/es.py +++ b/rllib/algorithms/es/es.py @@ -605,20 +605,3 @@ def __setstate__(self, state): FilterManager.synchronize( {DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers ) - - -# Deprecated: Use ray.rllib.algorithms.es.ESConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(ESConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.es.es.DEFAULT_CONFIG", - new="ray.rllib.algorithms.es.es.ESConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/es/es_torch_policy.py b/rllib/algorithms/es/es_torch_policy.py index 12b61f7f8af2..4028702e0ef2 100644 --- a/rllib/algorithms/es/es_torch_policy.py +++ b/rllib/algorithms/es/es_torch_policy.py @@ -125,7 +125,7 @@ def make_model_and_action_dist(policy, observation_space, action_space, config): name="ESTorchPolicy", framework="torch", loss_fn=None, - get_default_config=lambda: ray.rllib.algorithms.es.es.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.es.es.ESConfig(), before_init=before_init, after_init=after_init, make_model_and_action_dist=make_model_and_action_dist, diff --git a/rllib/algorithms/impala/__init__.py b/rllib/algorithms/impala/__init__.py index 626022747bf0..408d069c6ac5 100644 --- a/rllib/algorithms/impala/__init__.py +++ b/rllib/algorithms/impala/__init__.py @@ -1,4 +1,4 @@ -from ray.rllib.algorithms.impala.impala import Impala, ImpalaConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.impala.impala import Impala, ImpalaConfig from ray.rllib.algorithms.impala.impala_tf_policy import ( ImpalaTF1Policy, ImpalaTF2Policy, @@ -11,5 +11,4 @@ "ImpalaTF1Policy", "ImpalaTF2Policy", "ImpalaTorchPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index c8d95bb93141..006a6f726807 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -36,7 +36,6 @@ from ray.rllib.utils.metrics import ALL_MODULES from ray.rllib.utils.deprecation import ( DEPRECATED_VALUE, - Deprecated, deprecation_warning, ) from ray.rllib.utils.metrics import ( @@ -1159,20 +1158,3 @@ def process_episodes(self, batch: SampleBatchType) -> SampleBatchType: def get_host(self) -> str: return platform.node() - - -# Deprecated: Use ray.rllib.algorithms.impala.ImpalaConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(ImpalaConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.impala.impala::DEFAULT_CONFIG", - new="ray.rllib.algorithms.impala.impala::IMPALAConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/maddpg/__init__.py b/rllib/algorithms/maddpg/__init__.py index c6636817f43f..1722f775f1ef 100644 --- a/rllib/algorithms/maddpg/__init__.py +++ b/rllib/algorithms/maddpg/__init__.py @@ -1,7 +1,6 @@ from ray.rllib.algorithms.maddpg.maddpg import ( MADDPG, MADDPGConfig, - DEFAULT_CONFIG, ) -__all__ = ["MADDPGConfig", "MADDPG", "DEFAULT_CONFIG"] +__all__ = ["MADDPGConfig", "MADDPG"] diff --git a/rllib/algorithms/maddpg/maddpg.py b/rllib/algorithms/maddpg/maddpg.py index 6ed5f4c25efd..d9cc96fa88e3 100644 --- a/rllib/algorithms/maddpg/maddpg.py +++ b/rllib/algorithms/maddpg/maddpg.py @@ -17,7 +17,7 @@ from ray.rllib.algorithms.maddpg.maddpg_tf_policy import MADDPGTFPolicy from ray.rllib.policy.policy import Policy from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch -from ray.rllib.utils.annotations import Deprecated, override +from ray.rllib.utils.annotations import override from ray.rllib.utils.deprecation import DEPRECATED_VALUE logger = logging.getLogger(__name__) @@ -310,20 +310,3 @@ def get_default_policy_class( cls, config: AlgorithmConfig ) -> Optional[Type[Policy]]: return MADDPGTFPolicy - - -# Deprecated: Use ray.rllib.algorithms.maddpg.MADDPG instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(MADDPGConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.maddpg.maddpg.DEFAULT_CONFIG", - new="ray.rllib.algorithms.maddpg.maddpg.MADDPGConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/maml/__init__.py b/rllib/algorithms/maml/__init__.py index e7c844068f4a..0fb24a5499e5 100644 --- a/rllib/algorithms/maml/__init__.py +++ b/rllib/algorithms/maml/__init__.py @@ -1,7 +1,6 @@ -from ray.rllib.algorithms.maml.maml import MAML, MAMLConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.maml.maml import MAML, MAMLConfig __all__ = [ "MAML", "MAMLConfig", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/maml/maml.py b/rllib/algorithms/maml/maml.py index 0c871f0f68e0..fc9488e72d03 100644 --- a/rllib/algorithms/maml/maml.py +++ b/rllib/algorithms/maml/maml.py @@ -20,7 +20,7 @@ from ray.rllib.execution.metric_ops import CollectMetrics from ray.rllib.evaluation.metrics import collect_metrics from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated, DEPRECATED_VALUE +from ray.rllib.utils.deprecation import DEPRECATED_VALUE from ray.rllib.utils.metrics.learner_info import LEARNER_INFO from ray.rllib.utils.sgd import standardized from ray.util.iter import from_actors, LocalIterator @@ -378,20 +378,3 @@ def inner_adaptation_steps(itr): ) ) return train_op - - -# Deprecated: Use ray.rllib.algorithms.qmix.qmix.QMixConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(MAMLConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.maml.maml.DEFAULT_CONFIG", - new="ray.rllib.algorithms.maml.maml.MAMLConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/maml/maml_torch_policy.py b/rllib/algorithms/maml/maml_torch_policy.py index 5eb68f0c5e03..028534762402 100644 --- a/rllib/algorithms/maml/maml_torch_policy.py +++ b/rllib/algorithms/maml/maml_torch_policy.py @@ -300,7 +300,7 @@ class MAMLTorchPolicy(ValueNetworkMixin, KLCoeffMixin, TorchPolicyV2): """PyTorch policy class used with MAML.""" def __init__(self, observation_space, action_space, config): - config = dict(ray.rllib.algorithms.maml.maml.DEFAULT_CONFIG, **config) + config = dict(ray.rllib.algorithms.maml.maml.MAMLConfig(), **config) validate_config(config) TorchPolicyV2.__init__( diff --git a/rllib/algorithms/marwil/__init__.py b/rllib/algorithms/marwil/__init__.py index 02fe7b01d09b..7a6e5d6b0720 100644 --- a/rllib/algorithms/marwil/__init__.py +++ b/rllib/algorithms/marwil/__init__.py @@ -1,5 +1,4 @@ from ray.rllib.algorithms.marwil.marwil import ( - DEFAULT_CONFIG, MARWIL, MARWILConfig, ) @@ -15,6 +14,4 @@ "MARWILTF1Policy", "MARWILTF2Policy", "MARWILTorchPolicy", - # Deprecated. - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/marwil/marwil.py b/rllib/algorithms/marwil/marwil.py index 74c222abafb6..d46ee82b5bff 100644 --- a/rllib/algorithms/marwil/marwil.py +++ b/rllib/algorithms/marwil/marwil.py @@ -11,7 +11,7 @@ ) from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated, deprecation_warning +from ray.rllib.utils.deprecation import deprecation_warning from ray.rllib.utils.metrics import ( NUM_AGENT_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED, @@ -266,20 +266,3 @@ def training_step(self) -> ResultDict: self.workers.local_worker().set_global_vars(global_vars) return train_results - - -# Deprecated: Use ray.rllib.algorithms.marwil.MARWILConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(MARWILConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.marwil.marwil::DEFAULT_CONFIG", - new="ray.rllib.algorithms.marwil.marwil::MARWILConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/mbmpo/__init__.py b/rllib/algorithms/mbmpo/__init__.py index 90eb1c43a905..16401cbf8364 100644 --- a/rllib/algorithms/mbmpo/__init__.py +++ b/rllib/algorithms/mbmpo/__init__.py @@ -1,7 +1,6 @@ -from ray.rllib.algorithms.mbmpo.mbmpo import MBMPO, MBMPOConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.mbmpo.mbmpo import MBMPO, MBMPOConfig __all__ = [ "MBMPO", "MBMPOConfig", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/mbmpo/mbmpo.py b/rllib/algorithms/mbmpo/mbmpo.py index cf03cebdce88..cc35b5703d36 100644 --- a/rllib/algorithms/mbmpo/mbmpo.py +++ b/rllib/algorithms/mbmpo/mbmpo.py @@ -29,7 +29,7 @@ concat_samples, convert_ma_batch_to_sample_batch, ) -from ray.rllib.utils.annotations import Deprecated, override +from ray.rllib.utils.annotations import override from ray.rllib.utils.deprecation import DEPRECATED_VALUE from ray.rllib.utils.metrics.learner_info import LEARNER_INFO from ray.rllib.utils.sgd import standardized @@ -598,20 +598,3 @@ def validate_env(env: EnvType, env_context: EnvContext) -> None: f"Env {env} doest not have a `reward()` method, needed for " "MB-MPO! This `reward()` method should return " ) - - -# Deprecated: Use ray.rllib.algorithms.mbmpo.MBMPOConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(MBMPOConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.mbmpo.mbmpo.DEFAULT_CONFIG", - new="ray.rllib.algorithms.mbmpo.mbmpo.MBMPOConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/pg/__init__.py b/rllib/algorithms/pg/__init__.py index 945c492c8168..19115c7becda 100644 --- a/rllib/algorithms/pg/__init__.py +++ b/rllib/algorithms/pg/__init__.py @@ -1,11 +1,10 @@ -from ray.rllib.algorithms.pg.pg import PG, PGConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.pg.pg import PG, PGConfig from ray.rllib.algorithms.pg.pg_tf_policy import PGTF1Policy, PGTF2Policy from ray.rllib.algorithms.pg.pg_torch_policy import PGTorchPolicy from ray.rllib.algorithms.pg.utils import post_process_advantages __all__ = [ - "DEFAULT_CONFIG", "post_process_advantages", "PG", "PGConfig", @@ -13,5 +12,4 @@ "PGTF2Policy", "PGTorchPolicy", "post_process_advantages", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/pg/pg.py b/rllib/algorithms/pg/pg.py index dab6c3d38463..508c1ceb51e4 100644 --- a/rllib/algorithms/pg/pg.py +++ b/rllib/algorithms/pg/pg.py @@ -4,7 +4,6 @@ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated class PGConfig(AlgorithmConfig): @@ -132,20 +131,3 @@ def get_default_policy_class( from ray.rllib.algorithms.pg.pg_tf_policy import PGTF2Policy return PGTF2Policy - - -# Deprecated: Use ray.rllib.algorithms.pg.PGConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(PGConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.pg.default_config::DEFAULT_CONFIG", - new="ray.rllib.algorithms.pg.pg::PGConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/ppo/__init__.py b/rllib/algorithms/ppo/__init__.py index c592b38f782d..a54946f41ebc 100644 --- a/rllib/algorithms/ppo/__init__.py +++ b/rllib/algorithms/ppo/__init__.py @@ -1,4 +1,4 @@ -from ray.rllib.algorithms.ppo.ppo import PPOConfig, PPO, DEFAULT_CONFIG +from ray.rllib.algorithms.ppo.ppo import PPOConfig, PPO from ray.rllib.algorithms.ppo.ppo_tf_policy import PPOTF1Policy, PPOTF2Policy from ray.rllib.algorithms.ppo.ppo_torch_policy import PPOTorchPolicy @@ -8,5 +8,4 @@ "PPOTF2Policy", "PPOTorchPolicy", "PPO", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/ppo/ppo.py b/rllib/algorithms/ppo/ppo.py index 0d59f3d8d40e..18a06a5d660b 100644 --- a/rllib/algorithms/ppo/ppo.py +++ b/rllib/algorithms/ppo/ppo.py @@ -30,7 +30,6 @@ from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override from ray.rllib.utils.deprecation import ( - Deprecated, DEPRECATED_VALUE, deprecation_warning, ) @@ -215,7 +214,7 @@ def training( """ if vf_share_layers != DEPRECATED_VALUE: deprecation_warning( - old="ppo.DEFAULT_CONFIG['vf_share_layers']", + old="PPOConfig().vf_share_layers", new="PPOConfig().training(model={'vf_share_layers': ...})", error=True, ) @@ -517,20 +516,3 @@ def training_step(self) -> ResultDict: self.workers.local_worker().set_global_vars(global_vars) return train_results - - -# Deprecated: Use ray.rllib.algorithms.ppo.PPOConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(PPOConfig().to_dict()) - - @Deprecated( - old="ray.rllib.agents.ppo.ppo::DEFAULT_CONFIG", - new="ray.rllib.algorithms.ppo.ppo::PPOConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/qmix/__init__.py b/rllib/algorithms/qmix/__init__.py index 77033456c122..9781e470668e 100644 --- a/rllib/algorithms/qmix/__init__.py +++ b/rllib/algorithms/qmix/__init__.py @@ -1,3 +1,3 @@ -from ray.rllib.algorithms.qmix.qmix import QMix, QMixConfig, DEFAULT_CONFIG +from ray.rllib.algorithms.qmix.qmix import QMix, QMixConfig -__all__ = ["QMix", "QMixConfig", "DEFAULT_CONFIG"] +__all__ = ["QMix", "QMixConfig"] diff --git a/rllib/algorithms/qmix/qmix.py b/rllib/algorithms/qmix/qmix.py index 79f02332b8ce..5c00a6a4ac9d 100644 --- a/rllib/algorithms/qmix/qmix.py +++ b/rllib/algorithms/qmix/qmix.py @@ -13,7 +13,6 @@ ) from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.metrics import ( LAST_TARGET_UPDATE_TS, NUM_AGENT_STEPS_SAMPLED, @@ -321,20 +320,3 @@ def training_step(self) -> ResultDict: # Return all collected metrics for the iteration. return train_results - - -# Deprecated: Use ray.rllib.algorithms.qmix.qmix.QMixConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(QMixConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.qmix.qmix.DEFAULT_CONFIG", - new="ray.rllib.algorithms.qmix.qmix.QMixConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/r2d2/__init__.py b/rllib/algorithms/r2d2/__init__.py index d19bdec5affc..a5b6044d35f0 100644 --- a/rllib/algorithms/r2d2/__init__.py +++ b/rllib/algorithms/r2d2/__init__.py @@ -1,4 +1,4 @@ -from ray.rllib.algorithms.r2d2.r2d2 import R2D2, R2D2Config, R2D2_DEFAULT_CONFIG +from ray.rllib.algorithms.r2d2.r2d2 import R2D2, R2D2Config from ray.rllib.algorithms.r2d2.r2d2_tf_policy import R2D2TFPolicy from ray.rllib.algorithms.r2d2.r2d2_torch_policy import R2D2TorchPolicy @@ -7,5 +7,4 @@ "R2D2Config", "R2D2TFPolicy", "R2D2TorchPolicy", - "R2D2_DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/r2d2/r2d2.py b/rllib/algorithms/r2d2/r2d2.py index a67d0071f301..545f9469c47a 100644 --- a/rllib/algorithms/r2d2/r2d2.py +++ b/rllib/algorithms/r2d2/r2d2.py @@ -7,7 +7,6 @@ from ray.rllib.algorithms.r2d2.r2d2_torch_policy import R2D2TorchPolicy from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.deprecation import DEPRECATED_VALUE logger = logging.getLogger(__name__) @@ -221,20 +220,3 @@ def get_default_policy_class( return R2D2TorchPolicy else: return R2D2TFPolicy - - -# Deprecated: Use ray.rllib.algorithms.r2d2.r2d2.R2D2Config instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(R2D2Config().to_dict()) - - @Deprecated( - old="ray.rllib.agents.dqn.r2d2::R2D2_DEFAULT_CONFIG", - new="ray.rllib.algorithms.r2d2.r2d2::R2D2Config(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -R2D2_DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/r2d2/r2d2_tf_policy.py b/rllib/algorithms/r2d2/r2d2_tf_policy.py index 0c1b22ec397f..299513bd43b1 100644 --- a/rllib/algorithms/r2d2/r2d2_tf_policy.py +++ b/rllib/algorithms/r2d2/r2d2_tf_policy.py @@ -333,7 +333,7 @@ def setup_late_mixins( R2D2TFPolicy = build_tf_policy( name="R2D2TFPolicy", loss_fn=r2d2_loss, - get_default_config=lambda: ray.rllib.algorithms.r2d2.r2d2.R2D2_DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.r2d2.r2d2.R2D2Config(), postprocess_fn=postprocess_nstep_and_prio, stats_fn=build_q_stats, make_model=build_r2d2_model, diff --git a/rllib/algorithms/r2d2/r2d2_torch_policy.py b/rllib/algorithms/r2d2/r2d2_torch_policy.py index abfcc8c9f0dd..0e4f6cc4ac41 100644 --- a/rllib/algorithms/r2d2/r2d2_torch_policy.py +++ b/rllib/algorithms/r2d2/r2d2_torch_policy.py @@ -314,7 +314,7 @@ def extra_action_out_fn( name="R2D2TorchPolicy", framework="torch", loss_fn=r2d2_loss, - get_default_config=lambda: ray.rllib.algorithms.r2d2.r2d2.R2D2_DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.r2d2.r2d2.R2D2Config(), make_model_and_action_dist=build_r2d2_model_and_distribution, action_distribution_fn=get_distribution_inputs_and_class, stats_fn=build_q_stats, diff --git a/rllib/algorithms/sac/__init__.py b/rllib/algorithms/sac/__init__.py index 77addbb84554..222b4429b63b 100644 --- a/rllib/algorithms/sac/__init__.py +++ b/rllib/algorithms/sac/__init__.py @@ -1,11 +1,8 @@ -from ray.rllib.algorithms.sac.sac import SAC, DEFAULT_CONFIG, SACConfig +from ray.rllib.algorithms.sac.sac import SAC, SACConfig from ray.rllib.algorithms.sac.sac_tf_policy import SACTFPolicy from ray.rllib.algorithms.sac.sac_torch_policy import SACTorchPolicy -from ray.rllib.algorithms.sac.rnnsac import ( - RNNSAC, - DEFAULT_CONFIG as RNNSAC_DEFAULT_CONFIG, -) +from ray.rllib.algorithms.sac.rnnsac import RNNSAC from ray.rllib.algorithms.sac.rnnsac import RNNSACTorchPolicy, RNNSACConfig __all__ = [ @@ -16,7 +13,4 @@ "RNNSACTorchPolicy", "RNNSAC", "RNNSACConfig", - # Deprecated. - "DEFAULT_CONFIG", - "RNNSAC_DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/sac/rnnsac.py b/rllib/algorithms/sac/rnnsac.py index 0704a7da2b1a..176e389f4aef 100644 --- a/rllib/algorithms/sac/rnnsac.py +++ b/rllib/algorithms/sac/rnnsac.py @@ -8,7 +8,7 @@ from ray.rllib.algorithms.sac.rnnsac_torch_policy import RNNSACTorchPolicy from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import DEPRECATED_VALUE, Deprecated +from ray.rllib.utils.deprecation import DEPRECATED_VALUE class RNNSACConfig(SACConfig): @@ -124,19 +124,3 @@ def get_default_policy_class( cls, config: AlgorithmConfig ) -> Optional[Type[Policy]]: return RNNSACTorchPolicy - - -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(RNNSACConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.sac.rnnsac.DEFAULT_CONFIG", - new="ray.rllib.algorithms.sac.rnnsac.RNNSACConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/sac/rnnsac_torch_policy.py b/rllib/algorithms/sac/rnnsac_torch_policy.py index 32a562e8be61..085c287594cb 100644 --- a/rllib/algorithms/sac/rnnsac_torch_policy.py +++ b/rllib/algorithms/sac/rnnsac_torch_policy.py @@ -478,7 +478,7 @@ def reduce_mean_valid(t): RNNSACTorchPolicy = SACTorchPolicy.with_updates( name="RNNSACPolicy", - get_default_config=lambda: ray.rllib.algorithms.sac.rnnsac.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.sac.rnnsac.RNNSACConfig(), action_distribution_fn=action_distribution_fn, make_model_and_action_dist=build_sac_model_and_action_dist, loss_fn=actor_critic_loss, diff --git a/rllib/algorithms/sac/sac.py b/rllib/algorithms/sac/sac.py index b26bc26e0698..f5939edcce15 100644 --- a/rllib/algorithms/sac/sac.py +++ b/rllib/algorithms/sac/sac.py @@ -10,7 +10,6 @@ from ray.rllib.utils.deprecation import ( DEPRECATED_VALUE, deprecation_warning, - Deprecated, ) from ray.rllib.utils.framework import try_import_tf, try_import_tfp @@ -359,20 +358,3 @@ def get_default_policy_class( return SACTorchPolicy else: return SACTFPolicy - - -# Deprecated: Use ray.rllib.algorithms.sac.SACConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(SACConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.sac.sac::DEFAULT_CONFIG", - new="ray.rllib.algorithms.sac.sac::SACConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/sac/sac_tf_policy.py b/rllib/algorithms/sac/sac_tf_policy.py index 32c9ec805d67..a2a72cd96f56 100644 --- a/rllib/algorithms/sac/sac_tf_policy.py +++ b/rllib/algorithms/sac/sac_tf_policy.py @@ -777,7 +777,7 @@ def validate_spaces( # above. SACTFPolicy = build_tf_policy( name="SACTFPolicy", - get_default_config=lambda: ray.rllib.algorithms.sac.sac.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.sac.sac.SACConfig(), make_model=build_sac_model, postprocess_fn=postprocess_trajectory, action_distribution_fn=get_distribution_inputs_and_class, diff --git a/rllib/algorithms/sac/sac_torch_policy.py b/rllib/algorithms/sac/sac_torch_policy.py index 4bb56d2825d4..aa79c7b7bd50 100644 --- a/rllib/algorithms/sac/sac_torch_policy.py +++ b/rllib/algorithms/sac/sac_torch_policy.py @@ -503,7 +503,7 @@ def setup_late_mixins( name="SACTorchPolicy", framework="torch", loss_fn=actor_critic_loss, - get_default_config=lambda: ray.rllib.algorithms.sac.sac.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.sac.sac.SACConfig(), stats_fn=stats, postprocess_fn=postprocess_trajectory, extra_grad_process_fn=apply_grad_clipping, diff --git a/rllib/algorithms/simple_q/__init__.py b/rllib/algorithms/simple_q/__init__.py index 9ea347708547..5ff44c34450c 100644 --- a/rllib/algorithms/simple_q/__init__.py +++ b/rllib/algorithms/simple_q/__init__.py @@ -1,5 +1,4 @@ from ray.rllib.algorithms.simple_q.simple_q import ( - DEFAULT_CONFIG, SimpleQ, SimpleQConfig, ) @@ -15,5 +14,4 @@ "SimpleQTF1Policy", "SimpleQTF2Policy", "SimpleQTorchPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/simple_q/simple_q.py b/rllib/algorithms/simple_q/simple_q.py index 38ec3ba48c8b..2f9a8e60bd12 100644 --- a/rllib/algorithms/simple_q/simple_q.py +++ b/rllib/algorithms/simple_q/simple_q.py @@ -24,7 +24,7 @@ from ray.rllib.policy.policy import Policy from ray.rllib.utils import deep_update from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import DEPRECATED_VALUE, Deprecated +from ray.rllib.utils.deprecation import DEPRECATED_VALUE from ray.rllib.utils.metrics import ( LAST_TARGET_UPDATE_TS, NUM_AGENT_STEPS_SAMPLED, @@ -379,20 +379,3 @@ def training_step(self) -> ResultDict: # Return all collected metrics for the iteration. return train_results - - -# Deprecated: Use ray.rllib.algorithms.simple_q.simple_q.SimpleQConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(SimpleQConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.dqn.simple_q::DEFAULT_CONFIG", - new="ray.rllib.algorithms.simple_q.simple_q::SimpleQConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/slateq/__init__.py b/rllib/algorithms/slateq/__init__.py index 10733353c83d..203fb486bcfe 100644 --- a/rllib/algorithms/slateq/__init__.py +++ b/rllib/algorithms/slateq/__init__.py @@ -1,7 +1,6 @@ from ray.rllib.algorithms.slateq.slateq import ( SlateQ, SlateQConfig, - DEFAULT_CONFIG, ) from ray.rllib.algorithms.slateq.slateq_tf_policy import SlateQTFPolicy from ray.rllib.algorithms.slateq.slateq_torch_policy import SlateQTorchPolicy @@ -11,5 +10,4 @@ "SlateQConfig", "SlateQTFPolicy", "SlateQTorchPolicy", - "DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/slateq/slateq.py b/rllib/algorithms/slateq/slateq.py index a068c85634e1..c912f98c4fcc 100644 --- a/rllib/algorithms/slateq/slateq.py +++ b/rllib/algorithms/slateq/slateq.py @@ -21,7 +21,7 @@ from ray.rllib.algorithms.slateq.slateq_torch_policy import SlateQTorchPolicy from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated, DEPRECATED_VALUE +from ray.rllib.utils.deprecation import DEPRECATED_VALUE logger = logging.getLogger(__name__) @@ -241,20 +241,3 @@ def get_default_policy_class( return SlateQTorchPolicy else: return SlateQTFPolicy - - -# Deprecated: Use ray.rllib.algorithms.slateq.SlateQConfig instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(SlateQConfig().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.slateq.slateq::DEFAULT_CONFIG", - new="ray.rllib.algorithms.slateq.slateq::SlateQConfig(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/algorithms/slateq/slateq_tf_policy.py b/rllib/algorithms/slateq/slateq_tf_policy.py index 91a876320308..c6145bced515 100644 --- a/rllib/algorithms/slateq/slateq_tf_policy.py +++ b/rllib/algorithms/slateq/slateq_tf_policy.py @@ -364,7 +364,7 @@ def rmsprop_optimizer( SlateQTFPolicy = build_tf_policy( name="SlateQTFPolicy", - get_default_config=lambda: ray.rllib.algorithms.slateq.slateq.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.slateq.slateq.SlateQConfig(), # Build model, loss functions, and optimizers make_model=build_slateq_model, loss_fn=build_slateq_losses, diff --git a/rllib/algorithms/slateq/slateq_torch_policy.py b/rllib/algorithms/slateq/slateq_torch_policy.py index f46ea2c86c22..ea21d062d961 100644 --- a/rllib/algorithms/slateq/slateq_torch_policy.py +++ b/rllib/algorithms/slateq/slateq_torch_policy.py @@ -421,7 +421,7 @@ def setup_late_mixins( SlateQTorchPolicy = build_policy_class( name="SlateQTorchPolicy", framework="torch", - get_default_config=lambda: ray.rllib.algorithms.slateq.slateq.DEFAULT_CONFIG, + get_default_config=lambda: ray.rllib.algorithms.slateq.slateq.SlateQConfig(), before_init=setup_early, after_init=setup_late_mixins, loss_fn=build_slateq_losses, diff --git a/rllib/algorithms/td3/__init__.py b/rllib/algorithms/td3/__init__.py index 12884c091036..e17240ba6b51 100644 --- a/rllib/algorithms/td3/__init__.py +++ b/rllib/algorithms/td3/__init__.py @@ -1,7 +1,6 @@ -from ray.rllib.algorithms.td3.td3 import TD3, TD3Config, TD3_DEFAULT_CONFIG +from ray.rllib.algorithms.td3.td3 import TD3, TD3Config __all__ = [ "TD3", "TD3Config", - "TD3_DEFAULT_CONFIG", ] diff --git a/rllib/algorithms/td3/td3.py b/rllib/algorithms/td3/td3.py index 2319b3502ddc..bc8fe25d8143 100644 --- a/rllib/algorithms/td3/td3.py +++ b/rllib/algorithms/td3/td3.py @@ -6,7 +6,6 @@ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig from ray.rllib.algorithms.ddpg.ddpg import DDPG, DDPGConfig from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.deprecation import DEPRECATED_VALUE @@ -107,20 +106,3 @@ class TD3(DDPG): @override(DDPG) def get_default_config(cls) -> AlgorithmConfig: return TD3Config() - - -# Deprecated: Use ray.rllib.algorithms.ddpg..td3.TD3Config instead! -class _deprecated_default_config(dict): - def __init__(self): - super().__init__(TD3Config().to_dict()) - - @Deprecated( - old="ray.rllib.algorithms.ddpg.td3::TD3_DEFAULT_CONFIG", - new="ray.rllib.algorithms.td3.td3::TD3Config(...)", - error=True, - ) - def __getitem__(self, item): - return super().__getitem__(item) - - -TD3_DEFAULT_CONFIG = _deprecated_default_config() diff --git a/rllib/tests/backward_compat/test_backward_compat.py b/rllib/tests/backward_compat/test_backward_compat.py index 77f421af106c..c5e2d19a46ae 100644 --- a/rllib/tests/backward_compat/test_backward_compat.py +++ b/rllib/tests/backward_compat/test_backward_compat.py @@ -81,34 +81,6 @@ def test_old_checkpoint_formats(self): print(algo.train()) algo.stop() - def test_v1_policy_from_checkpoint(self): - """Tests, whether we can load Policy checkpoints for different frameworks.""" - - # We wouldn't need this test once we get rid of V1 policy implementations. - - rllib_dir = Path(__file__).parent.parent.parent - print(f"rllib dir={rllib_dir} exists={os.path.isdir(rllib_dir)}") - - for fw in framework_iterator(with_eager_tracing=True): - path_to_checkpoint = os.path.join( - rllib_dir, - "tests", - "backward_compat", - "checkpoints", - "v1.0", - "dqn_frozenlake_" + fw, - "policies", - "default_policy", - ) - - print( - f"path_to_checkpoint={path_to_checkpoint} " - f"exists={os.path.isdir(path_to_checkpoint)}" - ) - - policy = Policy.from_checkpoint(path_to_checkpoint) - self.assertTrue(isinstance(policy, Policy)) - def test_old_algorithm_config_dicts(self): """Tests, whether we can build Algorithm objects with old config dicts.""" diff --git a/rllib/tests/test_gpus.py b/rllib/tests/test_gpus.py index 984808d4b906..3d01901a5db6 100644 --- a/rllib/tests/test_gpus.py +++ b/rllib/tests/test_gpus.py @@ -2,7 +2,7 @@ import ray from ray import air -from ray.rllib.algorithms.a2c.a2c import A2C, A2C_DEFAULT_CONFIG +from ray.rllib.algorithms.a2c.a2c import A2CConfig from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.test_utils import framework_iterator from ray import tune @@ -18,9 +18,7 @@ def test_gpus_in_non_local_mode(self): actual_gpus = torch.cuda.device_count() print(f"Actual GPUs found (by torch): {actual_gpus}") - config = A2C_DEFAULT_CONFIG.copy() - config["num_workers"] = 2 - config["env"] = "CartPole-v1" + config = A2CConfig().rollouts(num_rollout_workers=2).environment("CartPole-v1") # Expect errors when we run a config w/ num_gpus>0 w/o a GPU # and _fake_gpus=False. @@ -32,9 +30,11 @@ def test_gpus_in_non_local_mode(self): ) for num_gpus_per_worker in per_worker: for fake_gpus in [False] + ([] if num_gpus == 0 else [True]): - config["num_gpus"] = num_gpus - config["num_gpus_per_worker"] = num_gpus_per_worker - config["_fake_gpus"] = fake_gpus + config.resources( + num_gpus=num_gpus, + num_gpus_per_worker=num_gpus_per_worker, + _fake_gpus=fake_gpus, + ) print( f"\n------------\nnum_gpus={num_gpus} " @@ -59,14 +59,14 @@ def test_gpus_in_non_local_mode(self): self.assertRaisesRegex( RuntimeError, "Found 0 GPUs on your machine", - lambda: A2C(config, env="CartPole-v1"), + lambda: config.build(), ) # If actual_gpus >= num_gpus or faked, # expect no error. else: print("direct RLlib") - trainer = A2C(config, env="CartPole-v1") - trainer.stop() + algo = config.build() + algo.stop() # Cannot run through ray.tune.Tuner().fit() w/ fake GPUs # as it would simply wait infinitely for the # resources to become available (even though, we @@ -88,22 +88,19 @@ def test_gpus_in_local_mode(self): actual_gpus_available = torch.cuda.device_count() - config = A2C_DEFAULT_CONFIG.copy() - config["num_workers"] = 2 - config["env"] = "CartPole-v1" + config = A2CConfig().rollouts(num_rollout_workers=2).environment("CartPole-v1") # Expect no errors in local mode. for num_gpus in [0, 0.1, 1, actual_gpus_available + 4]: print(f"num_gpus={num_gpus}") for fake_gpus in [False, True]: print(f"_fake_gpus={fake_gpus}") - config["num_gpus"] = num_gpus - config["_fake_gpus"] = fake_gpus + config.resources(num_gpus=num_gpus, _fake_gpus=fake_gpus) frameworks = ("tf", "torch") if num_gpus > 1 else ("tf2", "tf", "torch") for _ in framework_iterator(config, frameworks=frameworks): print("direct RLlib") - trainer = A2C(config, env="CartPole-v1") - trainer.stop() + algo = config.build() + algo.stop() print("via ray.tune.Tuner().fit()") tune.Tuner( "A2C", diff --git a/rllib/tests/test_local.py b/rllib/tests/test_local.py index ec884eba20af..0d35cb06eff9 100644 --- a/rllib/tests/test_local.py +++ b/rllib/tests/test_local.py @@ -1,7 +1,7 @@ import unittest import ray -from ray.rllib.algorithms.pg import PG, DEFAULT_CONFIG +from ray.rllib.algorithms.pg import PGConfig from ray.rllib.utils.test_utils import framework_iterator @@ -13,14 +13,14 @@ def tearDown(self) -> None: ray.shutdown() def test_local(self): - cf = DEFAULT_CONFIG.copy() - cf["model"]["fcnet_hiddens"] = [10] - cf["num_workers"] = 2 + cf = PGConfig().environment("CartPole-v1") + cf.model["fcnet_hiddens"] = [10] + cf.num_rollout_workers = 2 for _ in framework_iterator(cf): - agent = PG(cf, "CartPole-v1") - print(agent.train()) - agent.stop() + algo = cf.build() + print(algo.train()) + algo.stop() if __name__ == "__main__": diff --git a/rllib/tests/test_nested_action_spaces.py b/rllib/tests/test_nested_action_spaces.py index 054416f533eb..6943bd5a2bee 100644 --- a/rllib/tests/test_nested_action_spaces.py +++ b/rllib/tests/test_nested_action_spaces.py @@ -7,7 +7,7 @@ import ray from ray.rllib.algorithms.bc import BC -from ray.rllib.algorithms.pg import PG, DEFAULT_CONFIG +from ray.rllib.algorithms.pg import PGConfig from ray.rllib.examples.env.random_env import RandomEnv from ray.rllib.offline.json_reader import JsonReader from ray.rllib.policy.sample_batch import convert_ma_batch_to_sample_batch @@ -59,7 +59,7 @@ def tearDownClass(cls): ray.shutdown() def test_nested_action_spaces(self): - config = DEFAULT_CONFIG.copy() + config = PGConfig() config["env"] = RandomEnv # Write output to check, whether actions are written correctly. tmp_dir = os.popen("mktemp -d").read()[:-1] @@ -76,7 +76,7 @@ def test_nested_action_spaces(self): config["actions_in_input_normalized"] = True # Remove lr schedule from config, not needed here, and not supported by BC. - del config["lr_schedule"] + config.lr_schedule = None for _ in framework_iterator(config): for name, action_space in SPACES.items(): config["env_config"] = { @@ -86,7 +86,7 @@ def test_nested_action_spaces(self): print(f"A={action_space} flatten={flatten}") shutil.rmtree(config["output"]) config["_disable_action_flattening"] = not flatten - pg = PG(config) + pg = config.build() pg.train() pg.stop() @@ -117,7 +117,7 @@ def test_nested_action_spaces(self): ioctx.config["input_config"]["paths"], ioctx ) config["input_config"] = {"paths": config["output"]} - del config["output"] + config.output = None bc = BC(config=config) bc.train() bc.stop() diff --git a/rllib/tests/test_placement_groups.py b/rllib/tests/test_placement_groups.py index 63e52cdef168..54ca5e8f97ae 100644 --- a/rllib/tests/test_placement_groups.py +++ b/rllib/tests/test_placement_groups.py @@ -5,7 +5,7 @@ from ray import air from ray import tune from ray.tune import Callback -from ray.rllib.algorithms.pg import PG, DEFAULT_CONFIG +from ray.rllib.algorithms.pg import PG, PGConfig from ray.tune.experiment import Trial from ray.tune.execution.placement_groups import PlacementGroupFactory @@ -32,13 +32,16 @@ def tearDown(self) -> None: ray.shutdown() def test_overriding_default_resource_request(self): - config = DEFAULT_CONFIG.copy() - config["model"]["fcnet_hiddens"] = [10] - config["num_workers"] = 2 # 3 Trials: Can only run 2 at a time (num_cpus=6; needed: 3). - config["lr"] = tune.grid_search([0.1, 0.01, 0.001]) - config["env"] = "CartPole-v1" - config["framework"] = "tf" + config = ( + PGConfig() + .training( + model={"fcnet_hiddens": [10]}, lr=tune.grid_search([0.1, 0.01, 0.001]) + ) + .environment("CartPole-v1") + .rollouts(num_rollout_workers=2) + .framework("tf") + ) # Create an Algorithm with an overridden default_resource_request # method that returns a PlacementGroupFactory. @@ -66,15 +69,19 @@ def default_resource_request(cls, config): ).fit() def test_default_resource_request(self): - config = DEFAULT_CONFIG.copy() - config["model"]["fcnet_hiddens"] = [10] - config["num_workers"] = 2 - config["num_cpus_per_worker"] = 2 + config = ( + PGConfig() + .rollouts( + num_rollout_workers=2, + ) + .training( + model={"fcnet_hiddens": [10]}, lr=tune.grid_search([0.1, 0.01, 0.001]) + ) + .environment("CartPole-v1") + .framework("torch") + .resources(placement_strategy="SPREAD", num_cpus_per_worker=2) + ) # 3 Trials: Can only run 1 at a time (num_cpus=6; needed: 5). - config["lr"] = tune.grid_search([0.1, 0.01, 0.001]) - config["env"] = "CartPole-v1" - config["framework"] = "torch" - config["placement_strategy"] = "SPREAD" tune.Tuner( PG, @@ -88,10 +95,12 @@ def test_default_resource_request(self): ).fit() def test_default_resource_request_plus_manual_leads_to_error(self): - config = DEFAULT_CONFIG.copy() - config["model"]["fcnet_hiddens"] = [10] - config["num_workers"] = 0 - config["env"] = "CartPole-v1" + config = ( + PGConfig() + .training(model={"fcnet_hiddens": [10]}) + .environment("CartPole-v1") + .rollouts(num_rollout_workers=0) + ) try: tune.Tuner(