-
Notifications
You must be signed in to change notification settings - Fork 1
/
debug.py
59 lines (48 loc) · 1.43 KB
/
debug.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import torch
import numpy as np
from torch import autograd
import pdb
import traceback
import torch.nn as nn
import torch.optim as optim
class GuruMeditation (autograd.detect_anomaly):
def __init__(self):
super(GuruMeditation, self).__init__()
def __enter__(self):
super(GuruMeditation, self).__enter__()
return self
def __exit__(self, type, value, trace):
super(GuruMeditation, self).__exit__()
if isinstance(value, RuntimeError):
traceback.print_tb(trace)
halt(str(value))
def halt(msg):
print (msg)
pdb.set_trace()
class MyFunc(autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, gO):
# Error during the backward pass
raise RuntimeError("Some error in backward")
return gO.clone()
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
# RGB image and a mask...
self.fc3 = nn.Linear(10, 15, bias=False)
def forward(self, x) :
x2 = self.fc3(x)
x2[0] = 1/0.
encoderInit = Encoder()
opEncoderInit = optim.Adam(encoderInit.parameters(), lr=1e-3 * scale, betas=(0.5, 0.999))
def run_fn(a):
#out = MyFunc.apply(a)
out = encoderInit(a)
return out.sum()
with GuruMeditation() as gr :
inp = torch.rand(10, 10, requires_grad=True)
out = run_fn(inp)
out.backward()