-
Notifications
You must be signed in to change notification settings - Fork 1
/
val_base.py
134 lines (94 loc) · 4.7 KB
/
val_base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import random
from data import ImageDetectionsField,MatrixField, TextField, RawField
from data import COCO, DataLoader
import evaluation
from evaluation import PTBTokenizer, Cider
from models.transformer.transformer import Transformer
from models.transformer import MemoryAugmentedEncoder, MeshedDecoder, ScaledDotProductAttentionMemory
import torch
from tqdm import tqdm
import torch.nn as nn
import argparse, os, pickle
import numpy as np
import itertools
import multiprocessing
from shutil import copyfile
import warnings
warnings.filterwarnings("ignore")
import os, json
from torch.utils.tensorboard import SummaryWriter
from Evison import Display, show_network
import matplotlib.pyplot as plt
random.seed(1234)
torch.manual_seed(1234)
np.random.seed(1234)
def evaluate_metrics(model, dataloader, text_field):
import itertools
model.eval()
gen = {}
gts = {}
with tqdm(desc='evaluation', unit='it', total=len(dataloader)) as pbar:
for it, (images, caps_gt) in enumerate(iter(dataloader)):
images = images.to(device)
ad_matrix = caps_gt[0]
ad_matrix = torch.cat(ad_matrix, dim=1).reshape(1, 10, 10).to(device)
# print(ad_matrix)
# print(ad_matrix.type)
caps_gt = caps_gt[1]
print(caps_gt)
# images = images
with torch.no_grad():
out, _ = model.beam_search(images, ad_matrix, 20, text_field.vocab.stoi['<eos>'], 5, out_size=1)
caps_gen = text_field.decode(out, join_words=False)
print(caps_gen)
for i, (gts_i, gen_i) in enumerate(zip(caps_gt, caps_gen)):
gen_i = ' '.join([k for k, g in itertools.groupby(gen_i)])
gen['%d_%d' % (it, i)] = [gen_i, ]
gts['%d_%d' % (it, i)] = gts_i
pbar.update()
if not os.path.exists('predict_caption'):
os.makedirs('predict_caption')
json.dump(gen, open('predict_caption/predict_caption2.json', 'w'))
json.dump(gts,open('predict_caption/original_caption2.json', 'w'))
gts = evaluation.PTBTokenizer.tokenize(gts)
gen = evaluation.PTBTokenizer.tokenize(gen)
scores, _ = evaluation.compute_scores(gts, gen)
return scores
if __name__ == '__main__':
device = torch.device('cuda')
parser = argparse.ArgumentParser(description='Meshed-Memory Transformer')
parser.add_argument('--exp_name', type=str, default='m2_transformer')
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument('--workers', type=int, default=0)
parser.add_argument('--m', type=int, default=48)
parser.add_argument('--features_path', default='/data/zfzhu/lc/m2transformer/features/instruments18_caption/') #特征
#parser.add_argument('--features_path', default='/data/zfzhu/lc/m2transformer/features/instruments18_caption/') #特征
parser.add_argument('--annotation_folder', type=str, default = 'annotations/annotations') #标记
args = parser.parse_args()
print('Meshed-Memory Transformer Evaluation')
# Pipeline for image regions
image_field = ImageDetectionsField(detections_path=args.features_path, max_detections=10, load_in_tmp=False)
# Pipeline for text
text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, tokenize='spacy',
remove_punctuation=True, nopoints=False)
ad_matrix_field = MatrixField(detections_path=args.features_path, max_detections=10,load_in_tmp=False)
# Create the dataset
dataset = COCO(image_field,ad_matrix_field,text_field, args.features_path, args.annotation_folder, args.annotation_folder)
train_dataset, val_dataset = dataset.splits
text_field.vocab = pickle.load(open('vocab_%s.pkl' % args.exp_name, 'rb'))
memory = np.load('memery_random48.npy')
memory = memory[np.newaxis,:]
# Model and dataloaders
encoder = MemoryAugmentedEncoder(3, 0, memory, attention_module=ScaledDotProductAttentionMemory,
attention_module_kwargs={'m': args.m})
decoder = MeshedDecoder(len(text_field.vocab), 54, 3, text_field.vocab.stoi['<pad>'])
model = Transformer(text_field.vocab.stoi['<bos>'], encoder, decoder).to(device)
data = torch.load('/data/zfzhu/lc/m2transformer/saved_models/m2_transformer_best.pth')
#print(model)
model.load_state_dict(data['state_dict'])
print("Epoch %d" % data['epoch'])
print(data['best_cider'])
dict_dataset_val = val_dataset.image_dictionary({'image': image_field,'ad_matrix':ad_matrix_field, 'text': RawField()})
dict_dataloader_val = DataLoader(dict_dataset_val, batch_size=args.batch_size // 5)
scores = evaluate_metrics(model, dict_dataloader_val, text_field)
print(scores)