forked from initial-mockingbird/proyecto-1-ci5437
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
142 lines (129 loc) · 6.33 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import re
import os
import sys
import subprocess
import pandas as pd
import time
root_dir = 'src'
bin_dir = 'bin'
benchmark_dir = 'benchmarks'
results_dir = 'results'
if "__main__" == __name__:
if len(sys.argv) >= 2:
threshold = sys.argv[1]
else:
threshold = '900' # 15 minutes
print('No threshold provided, using default value 30, to change it run:')
print('python run.py <threshold>')
print(f'Using threshold {threshold}')
search_algo = ['bfs', 'iddfs', 'astar', 'idastar']
dataframes = []
# now we execute each of the binaries with the corresponding benchmark
for benchmark, bin_name in zip(os.listdir(benchmark_dir), os.listdir(bin_dir)):
bin_files = os.listdir(f'./{bin_dir}/{bin_name}')
benchmark_files = os.listdir(f'./{benchmark_dir}/{benchmark}')
index = [[], []]
columns = [[], []]
bin_dt = pd.DataFrame()
for bin_file in bin_files:
file_dt = pd.DataFrame()
for benchmark_file in benchmark_files:
with open(f'./{benchmark_dir}/{benchmark}/{benchmark_file}', 'r') as f:
lines_no_repeats = set(f.readlines())
for line in lines_no_repeats:
if line == '\n':
continue
algo_dt = pd.DataFrame()
for algo in search_algo:
# we capture command output
if algo in ['bfs', 'iddfs']:
print(
f'./bin/{bin_name}/{bin_file} {algo} {line} {threshold}')
init_time = time.time()
output = subprocess.check_output(
[f'./{bin_dir}/{bin_name}/{bin_file}', algo, line, threshold])
end_time = time.time()
else:
print(
f'./bin/{bin_name}/{bin_file} {algo} {line} {threshold} {bin_name} pdb')
init_time = time.time()
output = subprocess.check_output(
[f'./{bin_dir}/{bin_name}/{bin_file}', algo, line, threshold, bin_name, 'pdb'])
end_time = time.time()
exec_time = end_time - init_time
# we split the output by new line
output = output.decode('utf-8')
print(f'output: {output}')
print(f'exec_time: {exec_time}')
if re.search(r'Solution found!', output):
found = 1
else:
found = 0
if re.search(r'Error: signal', output):
out_memory = 1
else:
out_memory = 0
num_states = re.findall(r'\d+', output)
num_generated = num_states[-3]
num_expanded = num_states[-2]
max_depth = num_states[-1]
# we store the output in a dataframe
algo_dt[
f'{bin_file}_{algo}_f'
] = [found]
algo_dt[
f'{bin_file}_{algo}_g'
] = [num_generated]
algo_dt[
f'{bin_file}_{algo}_e'
] = [num_expanded]
algo_dt[
f'{bin_file}_{algo}_d'
] = [max_depth]
algo_dt[
f'{bin_file}_{algo}_t'
] = [exec_time]
# print the dataframes
# print(algo_dt)
isbef = [i for i, x in enumerate(
index[0]) if x == benchmark_file]
isel = [i for i, x in enumerate(
index[1]) if x == line.replace('\n', '')]
add_index = True
for i in isbef:
if i in isel:
add_index = False
if add_index:
index[0].append(benchmark_file)
index[1].append(line.replace('\n', ''))
isbif = [i for i, x in enumerate(
columns[0]) if x == bin_file]
isa = [i for i, x in enumerate(
columns[1]) if x == algo]
# add_column = True
# for i in isbif:
# if i in isa:
# add_column = False
# if add_column:
# # if not (bin_file in columns[0] and algo in columns[1]):
# columns[0].append(bin_file)
# columns[1].append(algo)
# columns[]
file_dt = pd.concat(
[file_dt, algo_dt], ignore_index=True)
print(file_dt)
bin_dt = pd.concat([bin_dt, file_dt], axis=1)
print(bin_dt)
# file_dt.to_csv(f'./{results_dir}/{bin_name}_{bin_file}.csv')
# print(index)
# print(columns)
bin_dt.index = pd.MultiIndex.from_tuples(
list(zip(*index)), names=['file', 'instance'])
bin_dt.columns = pd.MultiIndex.from_product(
[bin_files, search_algo, ['f', 'g', 'e', 'd', 't']], names=['pruning', 'algorithm', 'info'])
dataframes.append(bin_dt)
bin_dt.to_csv(f'./{results_dir}/{bin_name}_{threshold}.csv')
print(bin_dt)
# we save the dataframes to csv files
# for df, benchmark in zip(dataframes, os.listdir(benchmark_dir)):
# df.to_csv(f'./{results_dir}/{benchmark}.csv')