forked from tarmopungas/msc-thesis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.ini
36 lines (33 loc) · 983 Bytes
/
config.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# Replace weights_directory with either:
# * the *ABSOLUTE PATH* to where you store your weights, or
# * the huggingface repo with your weights.
# Specify probe_layer and intervene_layer according to the patching experiment.
[llama-13b]
weights_directory = /scratch-shared/tpungas/llama-13b
name = LLaMA-13B
tokenizer_class = LlamaTokenizer
model_class = LlamaForCausalLM
layers = model.layers
probe_layer = 13
intervene_layer = 7
noperiod = False
[llama-3-8b]
weights_directory = meta-llama/Meta-Llama-3-8B
name = LLaMA-3-8b
tokenizer_class = AutoTokenizer
model_class = AutoModelForCausalLM
layers = model.layers
probe_layer = 12
intervene_layer = 8
noperiod = False
[llama-3-70b]
weights_directory = meta-llama/Meta-Llama-3-70B
name = LLaMA-3-70b
tokenizer_class = AutoTokenizer
model_class = AutoModelForCausalLM
layers = model.layers
probe_layer = 33
intervene_layer = 21
noperiod = False
[hf_key]
hf_key = ... # replace this with your HuggingFace access token (if needed)