Skip to content

Commit

Permalink
fix: correct code linting
Browse files Browse the repository at this point in the history
  • Loading branch information
j-abi authored and Hartorn committed Nov 4, 2019
1 parent b1be1f9 commit ae3098c
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 35 deletions.
22 changes: 9 additions & 13 deletions pytorch_tabnet/sparsemax.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
from torch import nn
from torch.autograd import Function
import torch

import torch.nn.functional as F

# Other possible implementations:
# https://github.com/KrisKorrel/sparsemax-pytorch/blob/master/sparsemax.py
# https://github.com/msobroza/SparsemaxPytorch/blob/master/mnist/sparsemax.py
# https://github.com/vene/sparse-structured-attention/blob/master/pytorch/torchsparseattn/sparsemax.py
import torch

"""
Other possible implementations:
https://github.com/KrisKorrel/sparsemax-pytorch/blob/master/sparsemax.py
https://github.com/msobroza/SparsemaxPytorch/blob/master/mnist/sparsemax.py
https://github.com/vene/sparse-structured-attention/blob/master/pytorch/torchsparseattn/sparsemax.py
"""


# credits to Yandex https://github.com/Qwicen/node/blob/master/lib/nn_utils.py
Expand Down Expand Up @@ -55,7 +57,6 @@ def backward(ctx, grad_output):
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None


@staticmethod
def _threshold_and_support(input, dim=-1):
"""Sparsemax building block: compute the threshold
Expand All @@ -77,9 +78,6 @@ def _threshold_and_support(input, dim=-1):
return tau, support_size


#sparsemax = lambda input, dim=-1: SparsemaxFunction.apply(input, dim)


sparsemax = SparsemaxFunction.apply


Expand Down Expand Up @@ -174,10 +172,10 @@ def _backward(output, grad_output):
return grad_input



entmax15 = Entmax15Function.apply
entmoid15 = Entmoid15.apply


class Entmax15(nn.Module):

def __init__(self, dim=-1):
Expand All @@ -188,8 +186,6 @@ def forward(self, input):
return entmax15(input, self.dim)




# Credits were lost...
# def _make_ix_like(input, dim=0):
# d = input.size(dim)
Expand Down
26 changes: 13 additions & 13 deletions pytorch_tabnet/tab_model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import torch
import numpy as np
from tqdm import tqdm
import time
from sklearn.metrics import roc_auc_score, mean_squared_error, accuracy_score
from torch.autograd import Variable
from IPython.display import clear_output
from torch.nn.utils import clip_grad_norm_
import matplotlib.pyplot as plt
Expand Down Expand Up @@ -165,13 +163,17 @@ def fit(self, X_train, y_train, X_valid=None, y_valid=None,
samples_weight = np.array([weights[t] for t in y_train])

samples_weight = torch.from_numpy(samples_weight)
samples_weigth = samples_weight.double()
samples_weight = samples_weight.double()
sampler = WeightedRandomSampler(samples_weight, len(samples_weight))
train_dataloader = DataLoader(TorchDataset(X_train, y_train), batch_size=self.batch_size, sampler=sampler)
valid_dataloader = DataLoader(TorchDataset(X_valid, y_valid), batch_size=self.batch_size, shuffle=False)
train_dataloader = DataLoader(TorchDataset(X_train, y_train),
batch_size=self.batch_size, sampler=sampler)
valid_dataloader = DataLoader(TorchDataset(X_valid, y_valid),
batch_size=self.batch_size, shuffle=False)

train_dataloader = DataLoader(TorchDataset(X_train, y_train), batch_size=self.batch_size, shuffle=True)
valid_dataloader = DataLoader(TorchDataset(X_valid, y_valid), batch_size=self.batch_size, shuffle=False)
train_dataloader = DataLoader(TorchDataset(X_train, y_train),
batch_size=self.batch_size, shuffle=True)
valid_dataloader = DataLoader(TorchDataset(X_valid, y_valid),
batch_size=self.batch_size, shuffle=False)

losses_train = []
losses_valid = []
Expand All @@ -188,7 +190,6 @@ def fit(self, X_train, y_train, X_valid=None, y_valid=None,
metrics_train.append(fit_metrics['train']['stopping_loss'])
metrics_valid.append(fit_metrics['valid']['stopping_loss'])


stopping_loss = fit_metrics['valid']['stopping_loss']
if stopping_loss < self.best_cost:
self.best_cost = stopping_loss
Expand All @@ -204,14 +205,13 @@ def fit(self, X_train, y_train, X_valid=None, y_valid=None,

if self.epoch % self.verbose == 0:
clear_output()
fig = plt.figure(figsize=(15, 5))
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(range(len(losses_train)), losses_train, label='Train')
plt.plot(range(len(losses_valid)), losses_valid, label='Valid')
plt.grid()
plt.title('Losses')
plt.legend()
#plt.show()

plt.subplot(1, 2, 2)
plt.plot(range(len(metrics_train)), metrics_train, label='Train')
Expand Down Expand Up @@ -266,7 +266,7 @@ def train_epoch(self, train_loader):
values, indices = torch.max(batch_outs["y_preds"], dim=1)
y_preds.append(indices.cpu().detach().numpy())
ys.append(batch_outs["y"].cpu().detach().numpy())
total_loss+=batch_outs["loss"]
total_loss += batch_outs["loss"]
pbar.update(1)

y_preds = np.hstack(y_preds)
Expand Down Expand Up @@ -323,8 +323,8 @@ def train_batch(self, data, targets):

loss_value = loss.item()
batch_outs = {'loss': loss_value,
'y_preds': output,
'y': targets}
'y_preds': output,
'y': targets}
return batch_outs

def predict_epoch(self, loader):
Expand Down
22 changes: 13 additions & 9 deletions pytorch_tabnet/tab_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@
def initialize_non_glu(module, input_dim, output_dim):
gain_value = np.sqrt((input_dim+output_dim)/np.sqrt(4*input_dim))
torch.nn.init.xavier_normal_(module.weight, gain=gain_value)
#torch.nn.init.zeros_(module.bias)
# torch.nn.init.zeros_(module.bias)
return


def initialize_glu(module, input_dim, output_dim):
gain_value = np.sqrt((input_dim+output_dim)/np.sqrt(input_dim))
torch.nn.init.xavier_normal_(module.weight, gain=gain_value)
#torch.nn.init.zeros_(module.bias)
# torch.nn.init.zeros_(module.bias)
return


Expand All @@ -33,7 +33,8 @@ def __init__(self, input_dim, virtual_batch_size=128, momentum=0.01, device='cpu
self.device = device

def forward(self, x):
chunks = x.chunk(x.shape[0] // self.virtual_batch_size + ((x.shape[0] % self.virtual_batch_size) > 0))
chunks = x.chunk(x.shape[0] // self.virtual_batch_size +
((x.shape[0] % self.virtual_batch_size) > 0))
res = torch.Tensor([]).to(self.device)
for x_ in chunks:
y = self.bn(x_)
Expand Down Expand Up @@ -185,7 +186,8 @@ def forward(self, x):
for step in range(self.n_steps):
M = self.att_transformers[step](prior, att)
masks[step] = M
M_loss += torch.mean(torch.sum(torch.mul(M, torch.log(M+self.epsilon)), dim=1)) / (self.n_steps)
M_loss += torch.mean(torch.sum(torch.mul(M, torch.log(M+self.epsilon)),
dim=1)) / (self.n_steps)
# update prior
prior = torch.mul(self.gamma - M, prior)
# output
Expand Down Expand Up @@ -220,11 +222,12 @@ def __init__(self, input_dim, output_dim, virtual_batch_size=128, momentum=0.02,
super(AttentiveTransformer, self).__init__()
self.fc = Linear(input_dim, output_dim, bias=False)
initialize_non_glu(self.fc, input_dim, output_dim)
self.bn = GBN(output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum, device=device)
self.bn = GBN(output_dim, virtual_batch_size=virtual_batch_size,
momentum=momentum, device=device)

# Sparsemax
self.sp_max = sparsemax.Sparsemax(dim=-1)
#Entmax
# Entmax
# self.sp_max = sparsemax.Entmax15(dim=-1)

def forward(self, priors, processed_feat):
Expand Down Expand Up @@ -257,7 +260,8 @@ def __init__(self, input_dim, output_dim, shared_blocks, n_glu,
self.shared = deepcopy(shared_blocks)
if self.shared is not None:
for l in self.shared.glu_layers:
l.bn = GBN(2*output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum, device=device)
l.bn = GBN(2*output_dim, virtual_batch_size=virtual_batch_size,
momentum=momentum, device=device)

if self.shared is None:
self.specifics = GLU_Block(input_dim, output_dim,
Expand Down Expand Up @@ -303,7 +307,6 @@ def __init__(self, input_dim, output_dim, n_glu=2, first=False,
momentum=momentum,
device=device))


def forward(self, x):
if self.first: # the first layer of the block has no scale multiplication
x = self.glu_layers[0](x)
Expand All @@ -326,7 +329,8 @@ def __init__(self, input_dim, output_dim,
self.fc = Linear(input_dim, 2*output_dim, bias=False)
initialize_glu(self.fc, input_dim, 2*output_dim)

self.bn = GBN(2*output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum, device=device)
self.bn = GBN(2*output_dim, virtual_batch_size=virtual_batch_size,
momentum=momentum, device=device)

def forward(self, x):
x = self.fc(x)
Expand Down

0 comments on commit ae3098c

Please sign in to comment.