Skip to content

Commit

Permalink
updated local_model settings
Browse files Browse the repository at this point in the history
  • Loading branch information
NickLennonLiu committed Aug 16, 2023
1 parent 1ea040f commit 755aa1a
Show file tree
Hide file tree
Showing 8 changed files with 110 additions and 26 deletions.
59 changes: 59 additions & 0 deletions configs/eval_oreilly_others.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from mmengine.config import read_base
from opencompass.partitioners import SizePartitioner
from opencompass.runners import LocalRunner
from opencompass.tasks import OpenICLInferTask, OpenICLEvalTask

with read_base():
# Datasets
from .datasets.oreilly.oreilly_test_gen import oreilly_datasets as oreilly
# Models
from .models.gpt_4_peiqi import models as gpt_4
from .models.gpt_3dot5_turbo_peiqi import models as chatgpt_3dot5_turbo
from .local_models.chatglm2_6b import models as chatglm2_6b
from .local_models.internlm_chat_7b import models as internlm_chat_7b
from .local_models.xverse_13b import models as xverse_13b
from .local_models.qwen_chat_7b import models as qwen_chat_7b
from .local_models.llama2_7b_chat import models as llama2_chat_7b
from .local_models.llama2_13b_chat import models as llama2_chat_13b
from .local_models.baichuan_13b_chat import models as baichuan_13b_chat
from .models.hf_llama2_7b import models as hf_llama2_7b
from .models.hf_chatglm2_6b import models as hf_chatglm2_6b
from .models.hf_internlm_chat_7b import models as hf_internlm_chat_7b


datasets = [
*oreilly
]

from opencompass.models import HuggingFaceCausalLM



models = [
# *gpt_4, # demo pass
# *chatgpt_3dot5_turbo, # demo pass
# *chatglm2_6b, # demo pass
*internlm_chat_7b, # demo pass?
# *xverse_13b, # demo pass?
*qwen_chat_7b,
# *baichuan_13b_chat,
*llama2_chat_13b,
# *llama2_chat_7b,
# *hf_llama2_7b,
# *hf_chatglm2_6b,
# *hf_internlm_chat_7b,
]

infer = dict(
partitioner=dict(
type=SizePartitioner,
# max_task_size = 100,
# gen_task_coef = 1,
),
runner=dict(
type=LocalRunner,
max_num_workers=3,
max_workers_per_gpu=1,
task=dict(type=OpenICLInferTask),
),
)
30 changes: 16 additions & 14 deletions configs/eval_test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from .local_models.xverse_13b import models as xverse_13b
from .local_models.qwen_chat_7b import models as qwen_chat_7b
from .local_models.llama2_7b_chat import models as llama2_chat_7b
from .local_models.baichuan_7b import models as baichuan_7b
from .models.hf_llama2_7b import models as hf_llama2_7b
from .models.hf_chatglm2_6b import models as hf_chatglm2_6b
from .models.hf_internlm_chat_7b import models as hf_internlm_chat_7b
Expand All @@ -30,25 +31,26 @@
models = [
# *gpt_4, # demo pass
# *chatgpt_3dot5_turbo, # demo pass
# *baichuan_7b,
# *chatglm2_6b, # demo pass
# *internlm_chat_7b, # demo pass?
# *xverse_13b, # demo pass?
# *qwen_chat_7b,
*xverse_13b, # demo pass?
*qwen_chat_7b,
*llama2_chat_7b,
# *hf_llama2_7b,
# *hf_chatglm2_6b,
# *hf_internlm_chat_7b,
]

# infer = dict(
# partitioner=dict(
# type=SizePartitioner,
# max_task_size = 100,
# gen_task_coef = 1,
# ),
# runner=dict(
# type=LocalRunner,
# max_num_workers=4,
# task=dict(type=OpenICLInferTask),
# ),
# )
infer = dict(
partitioner=dict(
type=SizePartitioner,
# max_task_size = 100,
# gen_task_coef = 1,
),
runner=dict(
type=LocalRunner,
max_workers_per_gpu=1,
task=dict(type=OpenICLInferTask),
),
)
2 changes: 1 addition & 1 deletion configs/local_models/baichuan_13b_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@
max_seq_len=2048,
batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1),
run_cfg=dict(num_gpus=2, num_procs=1),
)
]
9 changes: 8 additions & 1 deletion configs/local_models/baichuan_13b_chat.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
from opencompass.models import HuggingFace, HuggingFaceCausalLM

_meta_template = dict(
round=[
dict(role='HUMAN', begin='<s> ', end='</s>\n'),
dict(role='BOT', begin='', end='', generate=True),
],
)

models = [
dict(
Expand All @@ -14,7 +20,8 @@
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1),
run_cfg=dict(num_gpus=2, num_procs=1),
)
]
9 changes: 8 additions & 1 deletion configs/local_models/internlm_chat_7b.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
from opencompass.models import HuggingFace, HuggingFaceCausalLM

_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|User|>:', end='<eoh>\n'),
dict(role='BOT', begin='<|Bot|>:', end='', generate=True),
],
)

models = [
dict(
Expand All @@ -14,10 +20,11 @@
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
# generate_kwargs=dict(
# temperature=0
# ),
run_cfg=dict(num_gpus=1, num_procs=1),
run_cfg=dict(num_gpus=2, num_procs=1),
)
]
18 changes: 10 additions & 8 deletions configs/local_models/llama2_13b_chat.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from opencompass.models import Llama2Chat
from opencompass.models import Llama2Chat, HuggingFaceCausalLM

# Please follow the instruction in the Meta AI website https://github.com/facebookresearch/llama
# and download the LLaMA-2-Chat model and tokenizer to the path './models/llama2/llama/'.
Expand All @@ -9,7 +9,7 @@
# cd llama
# pip install -e .

api_meta_template = dict(
_meta_template = dict(
round=[
dict(role="HUMAN", api_role="HUMAN"),
dict(role="BOT", api_role="BOT", generate=True),
Expand All @@ -18,14 +18,16 @@

models = [
dict(
abbr="llama-2-13b-chat",
type=Llama2Chat,
path="./models/llama2/llama/llama-2-13b-chat/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
meta_template=api_meta_template,
abbr="llama-2-7b-chat",
# type=Llama2Chat,
type=HuggingFaceCausalLM,
path="/mnt/mfs/opsgpt/models/llama-hf/Llama-2-13b-chat-hf/",
tokenizer_path="/mnt/mfs/opsgpt/models/llama-hf/Llama-2-13b-chat-hf/",
meta_template=_meta_template,
max_out_len=100,
max_seq_len=2048,
batch_size=16,
run_cfg=dict(num_gpus=2, num_procs=2),
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=2, num_procs=1),
),
]
7 changes: 7 additions & 0 deletions configs/local_models/qwen_chat_7b.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
from opencompass.models import HuggingFace, HuggingFaceCausalLM

_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|> ', end='<|im_end|><|endoftext|>\n'),
dict(role='BOT', begin='<|im_start|> ', end='', generate=True),
],
)

models = [
dict(
Expand All @@ -14,6 +20,7 @@
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
model_kwargs=dict(device_map='auto', trust_remote_code=True),
# generate_kwargs=dict(
# temperature=0
Expand Down
2 changes: 1 addition & 1 deletion configs/local_models/xverse_13b.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@
# generate_kwargs=dict(
# temperature=0
# ),
run_cfg=dict(num_gpus=1, num_procs=1),
run_cfg=dict(num_gpus=2, num_procs=1),
)
]

0 comments on commit 755aa1a

Please sign in to comment.