-
Notifications
You must be signed in to change notification settings - Fork 5.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RLlib] Optionally don't drop last ts in v-trace calculations (APPO and IMPALA). #19601
Changes from all commits
b3b82c4
dc4efee
6ab2be7
85eb6be
d8eec93
9ef09af
7d3dba0
89251a6
0f771e4
1f0b012
cbb1e91
6d0d412
e49839f
662e92a
684e69a
d97b5b4
8a75c74
9300105
fb7a5ff
fa605fd
df7ab2f
607af55
053c7e0
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -144,7 +144,9 @@ def reduce_mean_valid(t): | |
reduce_mean_valid = tf.reduce_mean | ||
|
||
if policy.config["vtrace"]: | ||
logger.debug("Using V-Trace surrogate loss (vtrace=True)") | ||
drop_last = policy.config["vtrace_drop_last_ts"] | ||
logger.debug("Using V-Trace surrogate loss (vtrace=True; " | ||
f"drop_last={drop_last})") | ||
|
||
# Prepare actions for loss. | ||
loss_actions = actions if is_multidiscrete else tf.expand_dims( | ||
|
@@ -155,7 +157,7 @@ def reduce_mean_valid(t): | |
|
||
# Prepare KL for Loss | ||
mean_kl = make_time_major( | ||
old_policy_action_dist.multi_kl(action_dist), drop_last=True) | ||
old_policy_action_dist.multi_kl(action_dist), drop_last=drop_last) | ||
|
||
unpacked_behaviour_logits = tf.split( | ||
behaviour_logits, output_hidden_shape, axis=1) | ||
|
@@ -166,16 +168,19 @@ def reduce_mean_valid(t): | |
with tf.device("/cpu:0"): | ||
vtrace_returns = vtrace.multi_from_logits( | ||
behaviour_policy_logits=make_time_major( | ||
unpacked_behaviour_logits, drop_last=True), | ||
unpacked_behaviour_logits, drop_last=drop_last), | ||
target_policy_logits=make_time_major( | ||
unpacked_old_policy_behaviour_logits, drop_last=True), | ||
unpacked_old_policy_behaviour_logits, drop_last=drop_last), | ||
actions=tf.unstack( | ||
make_time_major(loss_actions, drop_last=True), axis=2), | ||
make_time_major(loss_actions, drop_last=drop_last), | ||
axis=2), | ||
discounts=tf.cast( | ||
~make_time_major(tf.cast(dones, tf.bool), drop_last=True), | ||
~make_time_major( | ||
tf.cast(dones, tf.bool), drop_last=drop_last), | ||
tf.float32) * policy.config["gamma"], | ||
rewards=make_time_major(rewards, drop_last=True), | ||
values=values_time_major[:-1], # drop-last=True | ||
rewards=make_time_major(rewards, drop_last=drop_last), | ||
values=values_time_major[:-1] | ||
if drop_last else values_time_major, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. indentation, 4 spaces in front? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, not sure. LINTer says it's ok. You mean the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah, since it's a continuation of last line. no idea, minor comment. |
||
bootstrap_value=values_time_major[-1], | ||
dist_class=Categorical if is_multidiscrete else dist_class, | ||
model=model, | ||
|
@@ -186,11 +191,11 @@ def reduce_mean_valid(t): | |
) | ||
|
||
actions_logp = make_time_major( | ||
action_dist.logp(actions), drop_last=True) | ||
action_dist.logp(actions), drop_last=drop_last) | ||
prev_actions_logp = make_time_major( | ||
prev_action_dist.logp(actions), drop_last=True) | ||
prev_action_dist.logp(actions), drop_last=drop_last) | ||
old_policy_actions_logp = make_time_major( | ||
old_policy_action_dist.logp(actions), drop_last=True) | ||
old_policy_action_dist.logp(actions), drop_last=drop_last) | ||
|
||
is_ratio = tf.clip_by_value( | ||
tf.math.exp(prev_actions_logp - old_policy_actions_logp), 0.0, 2.0) | ||
|
@@ -210,7 +215,10 @@ def reduce_mean_valid(t): | |
mean_policy_loss = -reduce_mean_valid(surrogate_loss) | ||
|
||
# The value function loss. | ||
delta = values_time_major[:-1] - vtrace_returns.vs | ||
if drop_last: | ||
delta = values_time_major[:-1] - vtrace_returns.vs | ||
else: | ||
delta = values_time_major - vtrace_returns.vs | ||
value_targets = vtrace_returns.vs | ||
mean_vf_loss = 0.5 * reduce_mean_valid(tf.math.square(delta)) | ||
|
||
|
@@ -294,7 +302,8 @@ def stats(policy: Policy, train_batch: SampleBatch) -> Dict[str, TensorType]: | |
policy, | ||
train_batch.get(SampleBatch.SEQ_LENS), | ||
policy.model.value_function(), | ||
drop_last=policy.config["vtrace"]) | ||
drop_last=policy.config["vtrace"] | ||
and policy.config["vtrace_drop_last_ts"]) | ||
|
||
stats_dict = { | ||
"cur_lr": tf.cast(policy.cur_lr, tf.float64), | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,3 +17,5 @@ cartpole-impala-fake-gpus: | |
# Fake 2 GPUs. | ||
num_gpus: 2 | ||
_fake_gpus: true | ||
|
||
vtrace_drop_last_ts: false |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,3 +8,4 @@ cartpole-impala: | |
# Works for both torch and tf. | ||
framework: tf | ||
num_gpus: 0 | ||
vtrace_drop_last_ts: false |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what's ~make_time_major mean?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
~
means NOT.make_time_major
transforms a tensor of shape[B, T, ...]
into[T, B, ...]
.