Skip to content

Commit

Permalink
[Wav2Vec2-Conf / LLaMA] Style fix (huggingface#26188)
Browse files Browse the repository at this point in the history
* torch.nn -> nn

* fix llama

* copies
  • Loading branch information
sanchit-gandhi authored and blbadger committed Nov 8, 2023
1 parent 09f407b commit becf5e9
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def forward(self, hidden_states):


# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->OpenLlama
class OpenLlamaRotaryEmbedding(torch.nn.Module):
class OpenLlamaRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/llama/modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def forward(self, hidden_states):
return self.weight * hidden_states.to(input_dtype)


class LlamaRotaryEmbedding(torch.nn.Module):
class LlamaRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -584,16 +584,16 @@ def __init__(self, config):
if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.pointwise_conv1 = torch.nn.Conv1d(
self.pointwise_conv1 = nn.Conv1d(
config.hidden_size,
2 * config.hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.glu = torch.nn.GLU(dim=1)
self.depthwise_conv = torch.nn.Conv1d(
self.glu = nn.GLU(dim=1)
self.depthwise_conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
config.conv_depthwise_kernel_size,
Expand All @@ -602,17 +602,17 @@ def __init__(self, config):
groups=config.hidden_size,
bias=False,
)
self.batch_norm = torch.nn.BatchNorm1d(config.hidden_size)
self.batch_norm = nn.BatchNorm1d(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.pointwise_conv2 = torch.nn.Conv1d(
self.pointwise_conv2 = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.dropout = torch.nn.Dropout(config.conformer_conv_dropout)
self.dropout = nn.Dropout(config.conformer_conv_dropout)

def forward(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
Expand Down Expand Up @@ -798,7 +798,7 @@ def __init__(self, config):

# Self-Attention
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_dropout = torch.nn.Dropout(dropout)
self.self_attn_dropout = nn.Dropout(dropout)
self.self_attn = Wav2Vec2ConformerSelfAttention(config)

# Conformer Convolution
Expand Down

0 comments on commit becf5e9

Please sign in to comment.