Skip to content

Commit

Permalink
Remove instances of random_start_time.
Browse files Browse the repository at this point in the history
  • Loading branch information
javiarrobas committed Sep 12, 2024
1 parent e736b5d commit 9b9923b
Show file tree
Hide file tree
Showing 11 changed files with 3 additions and 26 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ env = BoptestGymEnv(
'UpperSetp[1]':(280.,310.)},
predictive_period = 24*3600,
regressive_period = 6*3600,
random_start_time = True,
max_episode_length = 24*3600,
warmup_period = 24*3600,
step_period = 3600)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1106,7 +1106,6 @@
" testcase = 'bestest_hydronic_heat_pump',\n",
" actions = ['oveHeaPumY_u'],\n",
" observations = {'reaTZon_y':(280.,310.)},\n",
" random_start_time = False,\n",
" start_time = 31*24*3600,\n",
" max_episode_length = 24*3600,\n",
" warmup_period = 24*3600,\n",
Expand All @@ -1125,8 +1124,7 @@
"- `actions`: List of strings indicating the action space.\n",
"- `observations`: Dictionary mapping observation keys to a tuple with the lower and upper bound of each observation. These bounds define the typical operational range for discretization and normalization purposes. Observation keys must belong either to the set of measurements or to the set of forecasting variables of the BOPTEST test case.\n",
"- `max_episode_lenght`: Maximum duration of each episode in seconds.\n",
"- `random_start_time`: Set to True if desired to use a random start time for each episode. That is typically usefull when training an RL agent to run several episodes with different boundary condition data. In our case, we set it to False and specify the start time of the episode.\n",
"- `start_time`: start time of the episode. It is specified in seconds from the beginning of the year. To be used in combination with `random_start_time=False`. \n",
"- `start_time`: start time of the episode. It is specified in seconds from the beginning of the year. if `start_time=None` (default) a random start time is used for each episode. The latter is usefull when training an RL agent to run several episodes with different boundary condition data.\n",
"- `warmup_period`: Desired simulation period to initialize each episode, in seconds. In our case, we simulate the testcase for one day right before the beginning of the episode.\n",
"- `step_period`: The period of each control step, in seconds. In this case is set to one hour."
]
Expand Down Expand Up @@ -2182,7 +2180,6 @@
" testcase = 'bestest_hydronic_heat_pump',\n",
" actions = ['oveHeaPumY_u'],\n",
" observations = {'reaTZon_y':(lower_setp,upper_setp)},\n",
" random_start_time = True,\n",
" excluding_periods = excluding_periods,\n",
" max_episode_length = 2*24*3600,\n",
" warmup_period = 24*3600,\n",
Expand Down Expand Up @@ -2642,7 +2639,6 @@
" testcase = 'bestest_hydronic_heat_pump',\n",
" actions = ['oveHeaPumY_u'],\n",
" observations = {'reaTZon_y':(lower_setp,upper_setp)},\n",
" random_start_time = False,\n",
" start_time = 31*24*3600,\n",
" max_episode_length = 24*3600,\n",
" warmup_period = 24*3600,\n",
Expand Down Expand Up @@ -2834,7 +2830,6 @@
" 'UpperSetp[1]':(280.,310.)},\n",
" predictive_period = 24*3600,\n",
" regressive_period = 6*3600,\n",
" random_start_time = True,\n",
" max_episode_length = 24*3600,\n",
" warmup_period = 24*3600,\n",
" step_period = 3600)\n",
Expand Down
2 changes: 0 additions & 2 deletions examples/generate_expert_traj.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,8 @@ def find_nearest_action(self, value):
start = '2021-02-01 00:00:00'
start_time = (pd.Timestamp(start)-pd.Timestamp(start_year)).total_seconds()
if isinstance(env,Wrapper):
env.unwrapped.random_start_time = False
env.unwrapped.start_time = start_time
else:
env.random_start_time = False
env.start_time = start_time

# Instantiate expert model. Distinguish between continuous or discrete
Expand Down
1 change: 0 additions & 1 deletion examples/run_baseline.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,6 @@ def run(envClass, wrapper=None, scenario={'electricity_price':'constant'},
env = envClass(url = url,
actions = ['oveHeaPumY_u'],
observations = {'reaTZon_y':(280.,310.)},
random_start_time = False,
start_time = 31*24*3600,
max_episode_length = 3*24*3600,
warmup_period = 3*24*3600,
Expand Down
1 change: 0 additions & 1 deletion examples/run_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def run(envClass, wrapper=None, plot=False):
env = envClass(url = url,
actions = ['oveHeaPumY_u'],
observations = {'reaTZon_y':(280.,310.)},
random_start_time = False,
start_time = 31*24*3600,
max_episode_length = 3*24*3600,
warmup_period = 3*24*3600,
Expand Down
1 change: 0 additions & 1 deletion examples/run_save_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ def train_A2C_with_callback(start_time_tests = [31*24*3600, 304*24*3600],
env = BoptestGymEnvRewardWeightCost(url = url,
actions = ['oveHeaPumY_u'],
observations = {'reaTZon_y':(280.,310.)},
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = 1*3600,
warmup_period = 3*3600,
Expand Down
1 change: 0 additions & 1 deletion examples/run_variable_episode.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ def compute_truncated(self, res, reward=None,
env = BoptestGymEnvVariableEpisodeLength(url = url,
actions = ['oveHeaPumY_u'],
observations = {'reaTZon_y':(280.,310.)},
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = 6*3600,
warmup_period = 3*3600,
Expand Down
1 change: 0 additions & 1 deletion examples/run_vectorized.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ def _init():
scenario={'electricity_price': 'dynamic'},
predictive_period=24 * 3600,
regressive_period=6 * 3600,
random_start_time=True,
excluding_periods=[(16 * 24 * 3600, 30 * 24 * 3600), (108 * 24 * 3600, 122 * 24 * 3600)],
max_episode_length=24 * 3600,
warmup_period=24 * 3600,
Expand Down
6 changes: 2 additions & 4 deletions examples/test_and_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,10 @@ def test_agent(env, model, start_time, episode_length, warmup_period,

# Set a fixed start time
if isinstance(env,Wrapper):
env.unwrapped.random_start_time = False
env.unwrapped.start_time = start_time
env.unwrapped.max_episode_length = episode_length
env.unwrapped.warmup_period = warmup_period
else:
env.random_start_time = False
env.start_time = start_time
env.max_episode_length = episode_length
env.warmup_period = warmup_period
Expand Down Expand Up @@ -62,9 +60,9 @@ def test_agent(env, model, start_time, episode_length, warmup_period,

# Back to random start time, just in case we're testing in the loop
if isinstance(env,Wrapper):
env.unwrapped.random_start_time = True
env.unwrapped.start_time = None
else:
env.random_start_time = True
env.start_time = None

return observations, actions, rewards, kpis

Expand Down
5 changes: 0 additions & 5 deletions examples/train_RL.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ def get_reward(self):
url = url,
actions = ['oveHeaPumY_u'],
observations = OrderedDict([('reaTZon_y',(280.,310.))]),
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = max_episode_length,
warmup_period = warmup_period,
Expand All @@ -126,7 +125,6 @@ def get_reward(self):
('PriceElectricPowerHighlyDynamic',(-0.4,0.4))]),
scenario = {'electricity_price':'highly_dynamic'},
predictive_period = 0,
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = max_episode_length,
warmup_period = warmup_period,
Expand All @@ -144,7 +142,6 @@ def get_reward(self):
('UpperSetp[1]',(280.,310.))]),
predictive_period = 0,
scenario = {'electricity_price':'highly_dynamic'},
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = max_episode_length,
warmup_period = warmup_period,
Expand All @@ -162,7 +159,6 @@ def get_reward(self):
('UpperSetp[1]',(280.,310.))]),
predictive_period = 3*3600,
scenario = {'electricity_price':'highly_dynamic'},
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = max_episode_length,
warmup_period = warmup_period,
Expand All @@ -185,7 +181,6 @@ def get_reward(self):
predictive_period = 24*3600,
regressive_period = 6*3600,
scenario = {'electricity_price':'highly_dynamic'},
random_start_time = True,
excluding_periods = excluding_periods,
max_episode_length = max_episode_length,
warmup_period = warmup_period,
Expand Down
3 changes: 0 additions & 3 deletions testing/test_boptestGymEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def setUp(self):
observations = {'reaTZon_y':(280.,310.)},
reward = ['reward'],
max_episode_length = 24*3600,
random_start_time = True,
warmup_period = 3600,
step_period = 900)

Expand Down Expand Up @@ -79,7 +78,6 @@ def test_reset_fixed(self):
'''

self.env.random_start_time = False
self.env.start_time = 14*24*3600
self.env.warmup_period = 3*3600

Expand All @@ -98,7 +96,6 @@ def test_reset_random(self):
'''

self.env.random_start_time = True
self.env.warmup_period = 1*3600
# Set the excluding periods to be the two first weeks of February
# and the two first weeks of November
Expand Down

0 comments on commit 9b9923b

Please sign in to comment.