Skip to content

Commit

Permalink
Add Qwen model (#182)
Browse files Browse the repository at this point in the history
Co-authored-by: Casper Hansen <[email protected]>
  • Loading branch information
Sanster and casper-hansen authored Nov 20, 2023
1 parent 84c8787 commit e440c7a
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 2 deletions.
3 changes: 2 additions & 1 deletion awq/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,5 @@
from .mistral import MistralAWQForCausalLM
from .gpt_neox import GPTNeoXAWQForCausalLM
from .aquila import AquilaAWQForCausalLM
from .yi import YiAWQForCausalLM
from .yi import YiAWQForCausalLM
from .qwen import QwenAWQForCausalLM
3 changes: 2 additions & 1 deletion awq/models/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
"mistral": MistralAWQForCausalLM,
"gpt_neox": GPTNeoXAWQForCausalLM,
"aquila": AquilaAWQForCausalLM,
"Yi": YiAWQForCausalLM
"Yi": YiAWQForCausalLM,
"qwen": QwenAWQForCausalLM
}

def check_and_get_model_type(model_dir, trust_remote_code=True):
Expand Down
55 changes: 55 additions & 0 deletions awq/models/qwen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from .base import BaseAWQForCausalLM


class QwenAWQForCausalLM(BaseAWQForCausalLM):
layer_type = "QWenBlock"
max_new_tokens_key = "seq_length"

@staticmethod
def get_model_layers(model):
return model.transformer.h

@staticmethod
def get_act_for_scaling(module):
return dict(is_scalable=False)

@staticmethod
def move_embed(model, device: str):
model.transformer.wte = model.transformer.wte.to(device)
model.transformer.rotary_emb = model.transformer.rotary_emb.to(device)

@staticmethod
def get_layers_for_scaling(module, input_feat, module_kwargs):
layers = []

# attention
layers.append(
dict(
prev_op=module.ln_1,
layers=[module.attn.c_attn, module.attn.c_proj],
inp=input_feat["attn.c_attn"],
module2inspect=module.attn,
kwargs=module_kwargs,
)
)

# mlp
layers.append(
dict(
prev_op=module.ln_2,
layers=[module.mlp.w2, module.mlp.w1],
inp=input_feat["mlp.w2"],
module2inspect=module.mlp,
)
)

# linear 2
layers.append(
dict(
prev_op=module.mlp.w1,
layers=[module.mlp.c_proj],
inp=input_feat["mlp.c_proj"],
)
)

return layers

0 comments on commit e440c7a

Please sign in to comment.