Skip to content

Commit e3b19ec

Browse files
committed
Initial import of code.
1 parent d862933 commit e3b19ec

File tree

108 files changed

+26008
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

108 files changed

+26008
-0
lines changed

.gitmodules

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
[submodule "dace"]
2+
path = dace
3+
url = https://github.com/spcl/dace.git

config_selection/load_data.py

+54
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import os
2+
import os.path
3+
import functools
4+
import numpy as np
5+
import pandas as pd
6+
7+
8+
_int_kernel_cols = ['h', 'b', 'j', 'k', 'u', 'n', 'p']
9+
10+
11+
@functools.lru_cache(maxsize=None)
12+
def load_kernel(filename, cache=True):
13+
if cache:
14+
cache_filename = filename + '.pkl'
15+
if os.path.exists(cache_filename):
16+
return pd.read_pickle(cache_filename)
17+
with open(filename, 'r') as f:
18+
lines = f.readlines()
19+
# Determine columns.
20+
parts = lines[0].split(' ')
21+
# -1 to skip the final 's'.
22+
columns = [parts[i].lower() for i in range(0, len(parts) - 1, 3)]
23+
data = {c: [] for c in columns}
24+
for line in lines:
25+
parts = line.split(' ')
26+
for i in range(0, len(parts) - 1, 3):
27+
data[parts[i].lower()].append(parts[i+2].lower())
28+
data['time'] = [float(t) for t in data['time']]
29+
# Try to convert to ints.
30+
for col in _int_kernel_cols:
31+
if col in data:
32+
data[col] = [int(x) for x in data[col]]
33+
df = pd.DataFrame(data=data)
34+
def translate_layouts(x):
35+
if isinstance(x, str):
36+
x = x.translate({ord('n'): 'i'})
37+
return x
38+
df = df.applymap(translate_layouts)
39+
if cache:
40+
df.to_pickle(cache_filename)
41+
return df
42+
43+
44+
@functools.lru_cache(maxsize=None)
45+
def load_tc(filename, cache=True):
46+
if cache:
47+
cache_filename = filename + '.pkl'
48+
if os.path.exists(cache_filename):
49+
return pd.read_pickle(cache_filename)
50+
df = pd.read_csv(filename)
51+
df.rename(columns={'Time': 'time'}, inplace=True)
52+
if cache:
53+
df.to_pickle(cache_filename)
54+
return df

config_selection/merge_benchmarks.py

+44
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
import argparse
2+
import glob
3+
import os.path
4+
import os
5+
from collections import Counter
6+
7+
parser = argparse.ArgumentParser(
8+
description='Concatenate all benchmark results into a single file')
9+
parser.add_argument(
10+
'bmdir', type=str,
11+
help='Directory containing benchmark results')
12+
parser.add_argument(
13+
'outdir', type=str,
14+
help='Directory to output results')
15+
parser.add_argument(
16+
'--basename', type=str,
17+
help='Basename of benchmark results to merge')
18+
19+
if __name__ == '__main__':
20+
args = parser.parse_args()
21+
if not os.path.exists(args.bmdir):
22+
raise ValueError(f'Benchmark directory {args.bmdir} not found')
23+
if not os.path.exists(args.outdir):
24+
os.makedirs(args.outdir)
25+
26+
if args.basename:
27+
pattern = args.basename + '-bm*.csv'
28+
else:
29+
pattern = '*-bm*.csv'
30+
bm_files = glob.glob(os.path.join(args.bmdir, pattern))
31+
benchmark_results = Counter()
32+
for f in bm_files:
33+
name = os.path.splitext(os.path.basename(f))[0]
34+
name = name.split('-')
35+
name = '-'.join(name[:-1])
36+
benchmark_results[name] += 1
37+
38+
for bm, num in benchmark_results.items():
39+
print(f'Merging {num} results from {bm}')
40+
with open(os.path.join(args.outdir, bm + '-combined.csv'), 'w') as out_f:
41+
for i in range(num):
42+
with open(os.path.join(args.bmdir, f'{bm}-bm{i}.csv'), 'r') as in_f:
43+
out_f.write(in_f.read())
44+

0 commit comments

Comments
 (0)