-
Notifications
You must be signed in to change notification settings - Fork 0
/
tokenizer.py
90 lines (73 loc) · 2.99 KB
/
tokenizer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
"""
python tokenizer.py --model "bert-base-uncased" --path "ucberkeley/experiment/run 1"
python tokenizer.py --model "distilbert-base-uncased" --path "jigsaw/experiment/run 1"
"""
from train_utils import dictionary
import argparse, os, pickle
import pandas as pd
# need to install on kaggle or colab
# !pip install datasets
import datasets
# need to install on google colab
# pip install transformers
from transformers import AutoTokenizer
import json
from dataclasses import dataclass
@dataclass
class Config:
max_seq_length = 128
def get_arguments():
parser = argparse.ArgumentParser(description='Tokenize dataset', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-m', '--model',help='Model whose tokenizer and pretrained version will be used',
type=str, default='bert-base-uncased'
)
parser.add_argument(
'-p', '--path',help="Experiment file's directory.",
type=str, default=None
)
return parser.parse_args()
def main():
args = get_arguments()
experiment_folder = args.path
Config.model_name = args.model
with open(os.path.join(experiment_folder, 'config.json')) as inputfile:
configDict = json.load(inputfile)
output_folder = os.path.join(args.path, args.model)
if not os.path.exists(output_folder):
print(f'Creating output folder {args.path}')
os.makedirs(output_folder, exist_ok=True)
text_column = configDict['text_column']
target_column = configDict['target_column']
id_column = configDict['id_column']
# load input files
train = pd.read_csv(os.path.join(experiment_folder, 'train.csv'))
validation = pd.read_csv(os.path.join(experiment_folder, 'validation.csv'))
test = pd.read_csv(os.path.join(experiment_folder, 'test.csv'))
# Drop unnecessary columns
final_columns = [id_column, text_column, target_column]
tokenizer = AutoTokenizer.from_pretrained(
Config.model_name,
do_lower_case=True,
is_split_into_words=True
)
def tokenize_function(examples):
return tokenizer(list(examples[text_column]), padding="max_length", max_length=Config.max_seq_length, truncation=True)
def process(df, split='train'):
df = df[final_columns]
df.loc[:, text_column] = df[text_column].astype(str).values
# create dataset from pandas dataframe
dataset = datasets.Dataset.from_pandas(df)
tokenized = dataset.map(tokenize_function, batched=True)
tokenized = tokenized.remove_columns([text_column])
tokenized.set_format("torch")
with open(os.path.join(output_folder, f'{split}.pkl'), 'wb') as output:
pickle.dump(tokenized, output, pickle.HIGHEST_PROTOCOL)
output.close()
process(train, 'train')
process(validation, 'validation')
process(test, 'test')
with open(os.path.join(output_folder, 'config.json'), 'w') as output:
json.dump(configDict | dictionary(Config), output, indent=4)
if __name__ == '__main__':
main()