forked from OpenRLHF/OpenRLHF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_dpo_ring_llama.sh
executable file
·41 lines (37 loc) · 1.02 KB
/
train_dpo_ring_llama.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
set -x
# requires pip install ring_flash_attn
read -r -d '' training_commands <<EOF
openrlhf.cli.train_dpo \
--save_path ./checkpoint/llama3-8b-ring-dpo \
--save_steps -1 \
--logging_steps 1 \
--eval_steps -1 \
--train_batch_size 256 \
--micro_train_batch_size 1 \
--pretrain OpenRLHF/Llama-3-8b-sft-mixture \
--bf16 \
--max_epochs 1 \
--max_len 8192 \
--zero_stage 3 \
--learning_rate 5e-7 \
--beta 0.1 \
--dataset OpenRLHF/preference_dataset_mixture2_and_safe_pku \
--apply_chat_template \
--chosen_key chosen \
--rejected_key rejected \
--ring_attn_size 2 \
--ring_head_stride 2 \
--packing_samples \
--flash_attn \
--load_checkpoint \
--gradient_checkpointing
EOF
# --use_wandb [WANDB_TOKENS] or True (use wandb login command)
# --ipo [for IPO]
# --label_smoothing 0.1 [for cDPO]
# --ref_offload
# --packing_samples
# --nll_loss_coef (Regularization with NLL loss)
if [[ ${1} != "slurm" ]]; then
deepspeed --module $training_commands
fi