-
Notifications
You must be signed in to change notification settings - Fork 2
/
run.sh
67 lines (61 loc) · 3.56 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# run dense baseline
# python llama.py /mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama-7b-hf c4
# # Run magnitude baseline
# python opt.py facebook/opt-125m c4 --sparsity .5 --gmp
# # Prune to 50\% uniform sparsity with SparseGPT
# python llama.py /mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama-7b-hf c4 --sparsity .5
# # Prune to full 2:4 sparsity with SparseGPT
# python opt.py facebook/opt-125m c4 --prunen 2 --prunem 4
# # Prune to 50\% + 4-bit with SparseGPT
# python opt.py facebook/opt-125m c4 --sparsity .5 --wbits 4
model=$1
sparsity_way=$2
model_size=$3
sparsity=$4
if [ $# -lt 1 ]; then
echo "usage: model_name sparsity_way model_size sparsity"
fi
echo $model
save_path=exp/$model/${model_size}B/${sparsity_way}_${sparsity}
if [ $model == "baichuan" ]; then
if [ $model_size == 7 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/baichuan-7B
elif [ $model_size == 13 ]; then
#model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/Baichuan-13B-Base
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/users/shaohang02/pretrained_model/Waihu-Baichuan2-13B-Chat-ckpt
fi
python baichuan.py $model_path c4 --sparsity ${sparsity} --sparsity_way ${sparsity_way} --save $save_path
elif [ $model == "llama" ]; then
if [ $model_size == 7 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama-7b-hf
elif [ $model_size == 13 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/users/shaohang02/lm-evaluation-harness/llm_models/llama-13b-hf
#/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama-13b-hf
elif [ $model_size == 30 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama-30b-hf-new
elif [ $model_size == 65 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama-65b-hf-new
fi
python llama.py $model_path c4 --sparsity ${sparsity} --sparsity_way ${sparsity_way} --save $save_path
elif [ $model == "opt" ]; then
deepspeed --num_gpus 1 --num_nodes 1 opt.py /mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/opt-1.3b c4 --sparsity ${sparsity} --sparsity_way ${sparsity_way}
elif [ $model == "llama2" ]; then
if [ $model_size == 7 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/llama2-7b-hf
elif [ $model_size == 13 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/users/chencong29/llm/models/Llama-2-13b-hf
fi
python llama2.py $model_path c4 --sparsity ${sparsity} --sparsity_way ${sparsity_way} --save $save_path
elif [ $model == "bloom" ]; then
if [ $model_size == 1.7 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/bloom-1b7
elif [ $model_size == 7 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/llm-fine-tuning/models/public/bloom-7b
fi
python bloom.py $model_path c4 --sparsity ${sparsity} --sparsity_way ${sparsity_way} --save $save_path
elif [ $model == "ziya" ]; then
if [ $model_size == 13 ]; then
model_path=/mnt/dolphinfs/hdd_pool/docker/user/hadoop-mtai/users/shaohang02/pretrained_model/mrc_上清
fi
python ziya.py $model_path c4 --sparsity ${sparsity} --sparsity_way ${sparsity_way} --save $save_path
fi