Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Detach all losses that get saved #1

Merged
merged 3 commits into from
Mar 17, 2021
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 50 additions & 39 deletions nequip/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,16 +574,17 @@ def batch_step(self, data, n_batches, validation=False):

self.model.train()

data = data.to(self.device)
data = AtomicData.to_AtomicDataDict(data)
if hasattr(self.model, "unscale"):
# This means that self.model is RescaleOutputs
# this will normalize the targets
# in validation (eval mode), it does nothing
# in train mode, if normalizes the targets
data = self.model.unscale(data)

out = self.model(data)
with torch.no_grad():
data = data.to(self.device)
data = AtomicData.to_AtomicDataDict(data)
if hasattr(self.model, "unscale"):
# This means that self.model is RescaleOutputs
# this will normalize the targets
# in validation (eval mode), it does nothing
# in train mode, if normalizes the targets
data = self.model.unscale(data)

out = self.model(data)
Linux-cpp-lisp marked this conversation as resolved.
Show resolved Hide resolved

# If we're in evaluation mode (i.e. validation), then
# data's target prop is unnormalized, and out's has been rescaled to be in the same units
Expand All @@ -593,54 +594,64 @@ def batch_step(self, data, n_batches, validation=False):
loss, loss_contrib = self.loss(pred=out, ref=data)

if not validation:

self.optim.zero_grad()
loss.backward()
self.optim.step()

if self.lr_scheduler_name == "CosineAnnealingWarmRestarts":
self.lr_sched.step(self.iepoch + self.ibatch / n_batches)

mae, mae_contrib = self.loss.mae(pred=out, ref=data)
scaled_loss_contrib = {}
if hasattr(self.model, "scale"):
# save loss stats
with torch.no_grad():
mae, mae_contrib = self.loss.mae(pred=out, ref=data)
scaled_loss_contrib = {}
if hasattr(self.model, "scale"):

for key in mae_contrib:
mae_contrib[key] = self.model.scale(
mae_contrib[key], force_process=True, do_shift=False
)

# TO DO, this evetually needs to be removed. no guarantee that a loss is MSE
for key in loss_contrib:
for key in mae_contrib:
mae_contrib[key] = self.model.scale(
mae_contrib[key], force_process=True, do_shift=False
)

scaled_loss_contrib[key] = {
k: torch.clone(v) for k, v in loss_contrib[key].items()
}
# TO DO, this evetually needs to be removed. no guarantee that a loss is MSE
for key in loss_contrib:

scaled_loss_contrib[key] = self.model.scale(
scaled_loss_contrib[key],
force_process=True,
do_shift=False,
do_scale=True,
)
scaled_loss_contrib[key] = {
k: torch.clone(v) for k, v in loss_contrib[key].items()
}

if "mse" in type(self.loss.funcs[key].func).__name__.lower():
scaled_loss_contrib[key] = self.model.scale(
scaled_loss_contrib[key],
force_process=True,
do_shift=False,
do_scale=True,
)

self.batch_loss = loss
self.batch_scaled_loss_contrib = scaled_loss_contrib
self.batch_loss_contrib = loss_contrib
self.batch_mae = mae
self.batch_mae_contrib = mae_contrib
if "mse" in type(self.loss.funcs[key].func).__name__.lower():
scaled_loss_contrib[key] = self.model.scale(
scaled_loss_contrib[key],
force_process=True,
do_shift=False,
do_scale=True,
)

self.batch_loss = loss.detach()
self.batch_scaled_loss_contrib = {
k1: {k2: v2.detach() for k2, v2 in v1.items()}
for k1, v1 in scaled_loss_contrib.items()
}
self.batch_loss_contrib = {
k1: {k2: v2.detach() for k2, v2 in v1.items()}
for k1, v1 in loss_contrib.items()
}
self.batch_mae = mae.detach()
self.batch_mae_contrib = {
k1: {k2: v2.detach() for k2, v2 in v1.items()}
for k1, v1 in mae_contrib.items()
}

self.end_of_batch_log(validation)
for callback in self.end_of_batch_callbacks:
callback(self)
self.end_of_batch_log(validation)
for callback in self.end_of_batch_callbacks:
callback(self)

@property
def early_stop_cond(self):
Expand Down