aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Fischer <florian.fl.fischer@fau.de>2020-03-09 19:14:54 +0100
committerFlorian Fischer <florian.fl.fischer@fau.de>2020-03-09 19:14:54 +0100
commit0e467466b1171f162948e9aaa369c6117d915e05 (patch)
tree9037afdb2813a104c0d60cc49f572418215786c0
parent19362370c09dd4ac945ed7cfff07d91f0db95353 (diff)
downloadallocbench-0e467466b1171f162948e9aaa369c6117d915e05.tar.gz
allocbench-0e467466b1171f162948e9aaa369c6117d915e05.zip
improve matplotlib code
add new helper functions to get all y_values for a plot use seperate figures for each plot s/filepostfix/file_postfix/ to be consistent
-rw-r--r--src/benchmarks/blowup.py2
-rw-r--r--src/benchmarks/cfrac.py8
-rw-r--r--src/benchmarks/dj_trace.py8
-rw-r--r--src/benchmarks/espresso.py8
-rw-r--r--src/benchmarks/falsesharing.py8
-rw-r--r--src/benchmarks/fd.py4
-rw-r--r--src/benchmarks/httpd.py6
-rw-r--r--src/benchmarks/keydb.py4
-rw-r--r--src/benchmarks/larson.py4
-rw-r--r--src/benchmarks/lld.py2
-rw-r--r--src/benchmarks/loop.py8
-rw-r--r--src/benchmarks/mysql.py12
-rw-r--r--src/benchmarks/raxmlng.py4
-rw-r--r--src/benchmarks/realloc.py2
-rw-r--r--src/benchmarks/redis.py4
-rw-r--r--src/benchmarks/t_test1.py8
-rw-r--r--src/plots.py119
17 files changed, 97 insertions, 114 deletions
diff --git a/src/benchmarks/blowup.py b/src/benchmarks/blowup.py
index cc85643..28692d6 100644
--- a/src/benchmarks/blowup.py
+++ b/src/benchmarks/blowup.py
@@ -58,7 +58,7 @@ class BenchmarkBlowup(Benchmark):
"{VmHWM}/1000",
ylabel="VmHWM in MB",
title="blowup test",
- filepostfix="vmhwm")
+ file_postfix="vmhwm")
plt.pgfplot(self,
self.iterate_args(self.results["args"]),
diff --git a/src/benchmarks/cfrac.py b/src/benchmarks/cfrac.py
index 07385eb..9b4bc64 100644
--- a/src/benchmarks/cfrac.py
+++ b/src/benchmarks/cfrac.py
@@ -80,7 +80,7 @@ class BenchmarkCfrac(Benchmark):
"{task-clock}/1000",
ylabel='"cpu-second"',
title='"Cfrac: runtime"',
- filepostfix="time")
+ file_postfix="time")
# L1 cache misses
plt.barplot_single_arg(
@@ -88,7 +88,7 @@ class BenchmarkCfrac(Benchmark):
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
ylabel="L1 misses in %",
title="Cfrac l1 cache misses",
- filepostfix="l1misses",
+ file_postfix="l1misses",
yerr=False)
# Memusage
@@ -96,7 +96,7 @@ class BenchmarkCfrac(Benchmark):
"{VmHWM}",
ylabel="VmHWM in KB",
title="Cfrac VmHWM",
- filepostfix="vmhwm")
+ file_postfix="vmhwm")
plt.write_tex_table(self, [{
"label": "Runtime [ms]",
@@ -107,7 +107,7 @@ class BenchmarkCfrac(Benchmark):
"expression": "{VmHWM}",
"sort": "<"
}],
- filepostfix="table")
+ file_postfix="table")
plt.export_stats_to_dataref(self, "task-clock")
diff --git a/src/benchmarks/dj_trace.py b/src/benchmarks/dj_trace.py
index 4730db3..b6ae83d 100644
--- a/src/benchmarks/dj_trace.py
+++ b/src/benchmarks/dj_trace.py
@@ -200,7 +200,7 @@ class BenchmarkDJTrace(Benchmark):
"{cputime}/1000",
ylabel="time in ms",
title="total runtime",
- filepostfix="runtime")
+ file_postfix="runtime")
# Function Times
func_times_means = {allocator: {} for allocator in allocators}
@@ -262,12 +262,12 @@ class BenchmarkDJTrace(Benchmark):
"{Max_RSS}/1000",
ylabel="Max RSS in MB",
title="Max RSS (VmHWM)",
- filepostfix="newrss")
+ file_postfix="newrss")
# self.barplot_fixed_arg("{Max_RSS}/1000",
# ylabel='"Max RSS in MB"',
# title='"Highwatermark of Vm (VmHWM)"',
- # filepostfix="newrss")
+ # file_postfix="newrss")
del allocators["Ideal_RSS"]
del self.results["stats"]["Ideal_RSS"]
@@ -312,7 +312,7 @@ class BenchmarkDJTrace(Benchmark):
"expression": "{Max_RSS}/1000",
"sort": "<"
}],
- filepostfix="table")
+ file_postfix="table")
# Tables
for perm in self.iterate_args(args=args):
diff --git a/src/benchmarks/espresso.py b/src/benchmarks/espresso.py
index 5149dcb..6012d02 100644
--- a/src/benchmarks/espresso.py
+++ b/src/benchmarks/espresso.py
@@ -84,7 +84,7 @@ class BenchmarkEspresso(Benchmark):
"{task-clock}/1000",
ylabel="cpu-second",
title="Espresso: runtime",
- filepostfix="time")
+ file_postfix="time")
# L1 cache misses
plt.barplot_single_arg(
@@ -92,7 +92,7 @@ class BenchmarkEspresso(Benchmark):
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
ylabel="L1 misses in %",
title="Espresso l1 cache misses",
- filepostfix="l1misses",
+ file_postfix="l1misses",
yerr=False)
# Memusage
@@ -100,7 +100,7 @@ class BenchmarkEspresso(Benchmark):
"{VmHWM}",
ylabel="VmHWM in KB",
title="Espresso VmHWM",
- filepostfix="vmhwm")
+ file_postfix="vmhwm")
plt.write_tex_table(self, [{
"label": "Runtime [ms]",
@@ -111,7 +111,7 @@ class BenchmarkEspresso(Benchmark):
"expression": "{VmHWM}",
"sort": "<"
}],
- filepostfix="table")
+ file_postfix="table")
plt.export_stats_to_dataref(self, "task-clock")
diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py
index b9d0006..5c18367 100644
--- a/src/benchmarks/falsesharing.py
+++ b/src/benchmarks/falsesharing.py
@@ -83,7 +83,7 @@ class BenchmarkFalsesharing(Benchmark):
"{speedup}",
ylabel="Speedup",
title="Speedup: {arg} {arg_value}",
- filepostfix="speedup",
+ file_postfix="speedup",
autoticks=False,
fixed=["bench"])
@@ -92,7 +92,7 @@ class BenchmarkFalsesharing(Benchmark):
"{l1chache_misses}",
ylabel="l1 cache misses in %",
title="cache misses: {arg} {arg_value}",
- filepostfix="l1-misses",
+ file_postfix="l1-misses",
autoticks=False,
fixed=["bench"])
@@ -100,7 +100,7 @@ class BenchmarkFalsesharing(Benchmark):
# "({LLC-load-misses}/{LLC-loads})*100",
# ylabel="llc cache misses in %",
# title="LLC misses: {arg} {arg_value}",
- # filepostfix="llc-misses",
+ # file_postfix="llc-misses",
# autoticks=False,
# fixed=["bench"])
@@ -109,7 +109,7 @@ class BenchmarkFalsesharing(Benchmark):
"expression": "{speedup}",
"sort": ">"
}],
- filepostfix="speedup.table")
+ file_postfix="speedup.table")
# plt.export_stats_to_csv(self, "speedup", "time")
# plt.export_stats_to_csv(self, "l1chache_misses", "l1-misses")
diff --git a/src/benchmarks/fd.py b/src/benchmarks/fd.py
index 0d5657d..6f88878 100644
--- a/src/benchmarks/fd.py
+++ b/src/benchmarks/fd.py
@@ -71,7 +71,7 @@ class BenchmarkFd(Benchmark):
"{task-clock}",
ylabel="runtime in ms",
title="fd runtime",
- filepostfix="runtime")
+ file_postfix="runtime")
plt.export_stats_to_dataref(self, "task-clock")
@@ -79,7 +79,7 @@ class BenchmarkFd(Benchmark):
"{VmHWM}",
ylabel="VmHWM in KB",
title="fd memusage",
- filepostfix="memusage")
+ file_postfix="memusage")
plt.export_stats_to_dataref(self, "VmHWM")
diff --git a/src/benchmarks/httpd.py b/src/benchmarks/httpd.py
index 2138834..8332f27 100644
--- a/src/benchmarks/httpd.py
+++ b/src/benchmarks/httpd.py
@@ -65,7 +65,7 @@ class BenchmarkHTTPD(Benchmark):
xlabel="threads",
ylabel="requests/s",
autoticks=False,
- filepostfix="requests",
+ file_postfix="requests",
title="{perm.site}: requests/s")
plt.plot_fixed_arg(self,
@@ -73,7 +73,7 @@ class BenchmarkHTTPD(Benchmark):
xlabel="threads",
ylabel="VmHWM in KB",
title="{perm.site}: nginx memory usage",
- filepostfix="httpd_vmhwm",
+ file_postfix="httpd_vmhwm",
autoticks=False)
plt.plot_fixed_arg(self,
@@ -81,7 +81,7 @@ class BenchmarkHTTPD(Benchmark):
xlabel="threads",
ylabel="VmHWM in KB",
title="{perm.site}: php-fpm memory usage",
- filepostfix="php-fpm_vmhwm",
+ file_postfix="php-fpm_vmhwm",
autoticks=False)
diff --git a/src/benchmarks/keydb.py b/src/benchmarks/keydb.py
index 3833a87..b8da614 100644
--- a/src/benchmarks/keydb.py
+++ b/src/benchmarks/keydb.py
@@ -113,11 +113,11 @@ class BenchmarkKeyDB(Benchmark):
plt.plot_fixed_arg(self, "{totals_ops}",
ylabel="'OPS/second'",
title="KeyDB Operations: {perm}",
- filepostfix="total_ops")
+ file_postfix="total_ops")
plt.plot_fixed_arg(self, "{keydb_vmhwm}",
ylabel="'VmHWM [KB]'",
title="KeyDB Memusage: {perm}",
- filepostfix="vmhwm")
+ file_postfix="vmhwm")
keydb = BenchmarkKeyDB()
diff --git a/src/benchmarks/larson.py b/src/benchmarks/larson.py
index 5f153ea..d2e9286 100644
--- a/src/benchmarks/larson.py
+++ b/src/benchmarks/larson.py
@@ -85,14 +85,14 @@ class BenchmarkLarson(Benchmark):
"{throughput}/1000000",
ylabel="MOPS/s",
title="Larson: {arg} {arg_value}",
- filepostfix="throughput")
+ file_postfix="throughput")
plt.plot_fixed_arg(
self,
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
ylabel="l1 cache misses in %",
title="Larson cache misses: {arg} {arg_value}",
- filepostfix="cachemisses")
+ file_postfix="cachemisses")
larson = BenchmarkLarson()
diff --git a/src/benchmarks/lld.py b/src/benchmarks/lld.py
index a9fe26c..55b9063 100644
--- a/src/benchmarks/lld.py
+++ b/src/benchmarks/lld.py
@@ -294,7 +294,7 @@ class BenchmarkLld(Benchmark):
"expression": "{task-clock}",
"sort": "<"
}],
- filepostfix="table")
+ file_postfix="table")
lld = BenchmarkLld()
diff --git a/src/benchmarks/loop.py b/src/benchmarks/loop.py
index 6d590c9..81d55cb 100644
--- a/src/benchmarks/loop.py
+++ b/src/benchmarks/loop.py
@@ -65,7 +65,7 @@ class BenchmarkLoop(Benchmark):
"{mops}",
ylabel="MOPS/cpu-second",
title="Loop: {arg} {arg_value}",
- filepostfix="time",
+ file_postfix="time",
autoticks=False)
# L1 cache misses
@@ -74,21 +74,21 @@ class BenchmarkLoop(Benchmark):
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
ylabel="L1 misses in %",
title="Loop l1 cache misses: {arg} {arg_value}",
- filepostfix="l1misses",
+ file_postfix="l1misses",
autoticks=False)
# Speed Matrix
plt.write_best_doublearg_tex_table(
self,
"{mops}",
- filepostfix="time.matrix")
+ file_postfix="time.matrix")
plt.write_tex_table(self, [{
"label": "MOPS/s",
"expression": "{mops}",
"sort": ">"
}],
- filepostfix="mops.table")
+ file_postfix="mops.table")
# plt.export_stats_to_csv(self, "task-clock")
# plt.export_stats_to_dataref(self, "task-clock")
diff --git a/src/benchmarks/mysql.py b/src/benchmarks/mysql.py
index 345a675..a5b215c 100644
--- a/src/benchmarks/mysql.py
+++ b/src/benchmarks/mysql.py
@@ -205,7 +205,7 @@ class BenchmarkMYSQL(Benchmark):
xlabel='"threads"',
ylabel='"transactions"',
title='"sysbench oltp read only"',
- filepostfix="l")
+ file_postfix="l")
# normalized linear plot
ref_alloc = list(allocators)[0]
@@ -213,7 +213,7 @@ class BenchmarkMYSQL(Benchmark):
xlabel='"threads"',
ylabel='"transactions scaled at " + scale',
title='"sysbench oltp read only"',
- filepostfix="norm.l",
+ file_postfix="norm.l",
scale=ref_alloc)
# bar plot
@@ -221,14 +221,14 @@ class BenchmarkMYSQL(Benchmark):
xlabel='"threads"',
ylabel='"transactions"',
title='"sysbench oltp read only"',
- filepostfix="b")
+ file_postfix="b")
# normalized bar plot
plt.barplot_single_arg(self, "{transactions}",
xlabel='"threads"',
ylabel='"transactions scaled at " + scale',
title='"sysbench oltp read only"',
- filepostfix="norm.b",
+ file_postfix="norm.b",
scale=ref_alloc)
# Memusage
@@ -236,7 +236,7 @@ class BenchmarkMYSQL(Benchmark):
xlabel='"threads"',
ylabel='"VmHWM in kB"',
title='"Memusage sysbench oltp read only"',
- filepostfix="mem")
+ file_postfix="mem")
plt.write_tex_table(self, [{
"label": "Transactions",
@@ -247,7 +247,7 @@ class BenchmarkMYSQL(Benchmark):
"expression": "{mysqld_vmhwm}",
"sort": "<"
}],
- filepostfix="table")
+ file_postfix="table")
# Colored latex table showing transactions count
d = {allocator: {} for allocator in allocators}
diff --git a/src/benchmarks/raxmlng.py b/src/benchmarks/raxmlng.py
index 228c220..8c8b878 100644
--- a/src/benchmarks/raxmlng.py
+++ b/src/benchmarks/raxmlng.py
@@ -86,7 +86,7 @@ class BenchmarkRaxmlng(Benchmark):
"{runtime}",
ylabel='"runtime in s"',
title='"raxml-ng tree inference benchmark"',
- filepostfix="runtime")
+ file_postfix="runtime")
plt.export_stats_to_dataref(self, "runtime")
@@ -94,7 +94,7 @@ class BenchmarkRaxmlng(Benchmark):
"{VmHWM}",
ylabel='"VmHWM in KB"',
title='"raxml-ng memusage"',
- filepostfix="memusage")
+ file_postfix="memusage")
plt.export_stats_to_dataref(self, "VmHWM")
diff --git a/src/benchmarks/realloc.py b/src/benchmarks/realloc.py
index 251185e..bd8f801 100644
--- a/src/benchmarks/realloc.py
+++ b/src/benchmarks/realloc.py
@@ -37,7 +37,7 @@ class BenchmarkRealloc(Benchmark):
plt.barplot_single_arg(self, "{task-clock}",
ylabel='"task-clock in ms"',
title='"realloc micro benchmark"',
- filepostfix="time")
+ file_postfix="time")
plt.export_stats_to_csv(self, "task-clock")
plt.export_stats_to_dataref(self, "task-clock")
diff --git a/src/benchmarks/redis.py b/src/benchmarks/redis.py
index 71d9227..cfad489 100644
--- a/src/benchmarks/redis.py
+++ b/src/benchmarks/redis.py
@@ -84,12 +84,12 @@ class BenchmarkRedis(Benchmark):
plt.barplot_single_arg(self, "{requests}",
ylabel='"requests per s"',
title='"redis throughput"',
- filepostfix="requests")
+ file_postfix="requests")
plt.barplot_single_arg(self, "{redis_vmhwm}",
ylabel='"VmHWM in KB"',
title='"redis memusage"',
- filepostfix="vmhwm")
+ file_postfix="vmhwm")
plt.export_stats_to_dataref(self, "requests")
diff --git a/src/benchmarks/t_test1.py b/src/benchmarks/t_test1.py
index 119f2a5..f0856f6 100644
--- a/src/benchmarks/t_test1.py
+++ b/src/benchmarks/t_test1.py
@@ -45,7 +45,7 @@ class BenchmarkTTest1(Benchmark):
plt.plot_fixed_arg(self, yval,
ylabel='"Mops / CPU second"',
title='"T-Ttest1: " + arg + " " + str(arg_value)',
- filepostfix="time",
+ file_postfix="time",
autoticks=False)
# L1 cache misses
@@ -53,18 +53,18 @@ class BenchmarkTTest1(Benchmark):
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
ylabel='"L1 misses in %"',
title='"T-Test1 l1 cache misses: " + arg + " " + str(arg_value)',
- filepostfix="l1misses",
+ file_postfix="l1misses",
autoticks=False)
# Speed Matrix
- plt.write_best_doublearg_tex_table(self, yval, filepostfix="mops.matrix")
+ plt.write_best_doublearg_tex_table(self, yval, file_postfix="mops.matrix")
plt.write_tex_table(self, [{
"label": "MOPS/s",
"expression": yval,
"sort": ">"
}],
- filepostfix="mops.table")
+ file_postfix="mops.table")
plt.export_stats_to_csv(self, "task-clock")
diff --git a/src/plots.py b/src/plots.py
index c254741..94dcc4b 100644
--- a/src/plots.py
+++ b/src/plots.py
@@ -56,8 +56,32 @@ def _eval_with_stat(bench, evaluation, alloc, perm, stat):
return nan
return eval(res)
+def _get_y_data(bench, expression, allocator, perms, stat="mean", scale=None):
+ """Helper to get the y data of an allocator for given permutations"""
+ y_data = []
+ for perm in perms:
+ if scale:
+ if scale == allocator:
+ y_data.append(1)
+ else:
+ val = _eval_with_stat(bench, expression, allocator, perm, stat)
+ norm_val = _eval_with_stat(bench, expression, scale, perm, stat)
+ y_data.append(val / norm_val)
+ else:
+ y_data.append(_eval_with_stat(bench, expression, allocator, perm, stat))
+
+ return y_data
+
+def _save_figure(bench, fig, sumdir='', file_postfix='', file_ext=src.globalvars.summary_file_ext):
+ figname = os.path.join(sumdir, f"{bench.name}.{file_postfix}.{file_ext}")
+ if figname.endswith(".tex"):
+ import tikzplotlib
+ tikzplotlib.save(figname)
+ else:
+ fig.savefig(figname)
+
def plot_single_arg(bench, yval, ylabel="y-label", xlabel="x-label",
- autoticks=True, title="default title", filepostfix="",
+ autoticks=True, title="default title", file_postfix="",
sumdir="", arg="", scale=None, file_ext=src.globalvars.summary_file_ext):
"""plot line graphs for each permutation of the benchmark's command arguments"""
@@ -66,24 +90,15 @@ def plot_single_arg(bench, yval, ylabel="y-label", xlabel="x-label",
arg = arg or list(args.keys())[0]
+ fig = plt.figure()
+
if not autoticks:
x_vals = list(range(1, len(args[arg]) + 1))
else:
x_vals = args[arg]
for allocator in allocators:
- y_vals = []
- for perm in bench.iterate_args(args=args):
- if scale:
- if scale == allocator:
- y_vals = [1] * len(x_vals)
- else:
- mean = _eval_with_stat(bench, yval, allocator, perm, "mean")
- norm_mean = _eval_with_stat(bench, yval, scale, perm, "mean")
- y_vals.append(mean / norm_mean)
- else:
- y_vals.append(_eval_with_stat(bench, yval, allocator, perm, "mean"))
-
+ y_vals = _get_y_data(bench, yval, allocator, bench.iterate_args(args=args), stat='mean', scale=scale)
plt.plot(x_vals, y_vals, marker='.', linestyle='-',
label=allocator, color=_get_alloc_color(bench, allocator))
@@ -95,16 +110,14 @@ def plot_single_arg(bench, yval, ylabel="y-label", xlabel="x-label",
plt.xlabel(xlabel.format(**label_substitutions))
plt.ylabel(ylabel.format(**label_substitutions))
plt.title(title.format(**label_substitutions))
- figname = os.path.join(sumdir, f"{bench.name}.{filepostfix}.{file_ext}")
- if figname.endswith(".tex"):
- import tikzplotlib
- tikzplotlib.save(figname)
- else:
- plt.savefig(figname)
- plt.clf()
+
+ _save_figure(bench, fig, sumdir, file_postfix, file_ext)
+ fig.close()
+
+ return fig
def barplot_single_arg(bench, yval, ylabel="y-label", xlabel="x-label",
- title="default title", filepostfix="", sumdir="",
+ title="default title", file_postfix="", sumdir="",
arg="", scale=None, file_ext=src.globalvars.summary_file_ext, yerr=True):
"""plot bar plots for each permutation of the benchmark's command arguments"""
@@ -121,26 +134,14 @@ def barplot_single_arg(bench, yval, ylabel="y-label", xlabel="x-label",
narg = len(arg)
+ fig = plt.figure()
+
for i, allocator in enumerate(allocators):
x_vals = list(range(i, narg * (nallocators+1), nallocators+1))
- y_vals = []
+ y_vals = _get_y_data(bench, yval, allocator, bench.iterate_args(args=args), stat='mean', scale=scale)
y_errs = None
if yerr:
- y_errs = []
-
- for perm in bench.iterate_args(args=args):
- if scale:
- if scale == allocator:
- y_vals = [1] * len(x_vals)
- else:
- mean = _eval_with_stat(bench, yval, allocator, perm, "mean")
- norm_mean = _eval_with_stat(bench, yval, scale, perm, "mean")
- y_vals.append(mean / norm_mean)
- else:
- y_vals.append(_eval_with_stat(bench, yval, allocator, perm, "mean"))
-
- if yerr:
- y_errs.append(_eval_with_stat(bench, yval, allocator, perm, "std"))
+ y_vals = _get_y_data(bench, yval, allocator, bench.iterate_args(args=args), stat='std')
plt.bar(x_vals, y_vals, width=1, label=allocator, yerr=y_errs,
color=_get_alloc_color(bench, allocator))
@@ -153,16 +154,12 @@ def barplot_single_arg(bench, yval, ylabel="y-label", xlabel="x-label",
plt.xlabel(xlabel.format(**label_substitutions))
plt.ylabel(ylabel.format(**label_substitutions))
plt.title(title.format(**label_substitutions))
- figname = os.path.join(sumdir, f"{bench.name}.{filepostfix}.{file_ext}")
- if figname.endswith(".tex"):
- import tikzplotlib
- tikzplotlib.save(figname)
- else:
- plt.savefig(figname)
- plt.clf()
+
+ _save_figure(bench, fig, sumdir, file_postfix, file_ext)
+ fig.close()
def plot_fixed_arg(bench, yval, ylabel="y-label", xlabel="{loose_arg}",
- autoticks=True, title="default title", filepostfix="",
+ autoticks=True, title="default title", file_postfix="",
sumdir="", fixed=None, file_ext=src.globalvars.summary_file_ext, scale=None):
args = bench.results["args"]
@@ -177,18 +174,10 @@ def plot_fixed_arg(bench, yval, ylabel="y-label", xlabel="{loose_arg}",
x_vals = args[loose_arg]
for arg_value in args[arg]:
+ fig = plt.figure()
+
for allocator in allocators:
- y_vals = []
- for perm in bench.iterate_args_fixed({arg: arg_value}, args=args):
- if scale:
- if scale == allocator:
- y_vals = [1] * len(x_vals)
- else:
- mean = _eval_with_stat(bench, yval, allocator, perm, "mean")
- norm_mean = _eval_with_stat(bench, yval, scale, perm, "mean")
- y_vals.append(mean / norm_mean)
- else:
- y_vals.append(_eval_with_stat(bench, yval, allocator, perm, "mean"))
+ y_vals = _get_y_data(bench, yval, allocator, bench.iterate_args_fixed({arg: arg_value}, args=args), stat='mean', scale=scale)
plt.plot(x_vals, y_vals, marker='.', linestyle='-',
label=allocator, color=_get_alloc_color(bench, allocator))
@@ -202,14 +191,9 @@ def plot_fixed_arg(bench, yval, ylabel="y-label", xlabel="{loose_arg}",
plt.xlabel(xlabel.format(**label_substitutions))
plt.ylabel(ylabel.format(**label_substitutions))
plt.title(title.format(**label_substitutions))
- figname = os.path.join(sumdir,
- f"{bench.name}.{arg}.{arg_value}.{filepostfix}.{file_ext}")
- if figname.endswith(".tex"):
- import tikzplotlib
- tikzplotlib.save(figname)
- else:
- plt.savefig(figname)
- plt.clf()
+
+ _save_figure(bench, fig, sumdir, file_postfix, file_ext)
+ fig.close()
def export_facts_to_file(bench, comment_symbol, output_file):
"""Write collected facts about used system and benchmark to file"""
@@ -304,8 +288,7 @@ def export_stats_to_dataref(bench, datapoint, path=None):
cur_line.replace("_", "-")
print(cur_line, file=dataref_file)
-def write_best_doublearg_tex_table(bench, expr, sort=">",
- filepostfix="", sumdir=""):
+def write_best_doublearg_tex_table(bench, expr, sort=">", file_postfix="", sumdir=""):
args = bench.results["args"]
keys = list(args.keys())
allocators = bench.results["allocators"]
@@ -358,7 +341,7 @@ def write_best_doublearg_tex_table(bench, expr, sort=">",
print("\\end{tabular}", file=tex_file)
print("\\end{document}", file=tex_file)
-def write_tex_table(bench, entries, filepostfix="", sumdir=""):
+def write_tex_table(bench, entries, file_postfix="", sumdir=""):
"""generate a latex standalone table from an list of entries dictionaries
Entries must have at least the two keys: "label" and "expression".
@@ -396,7 +379,7 @@ def write_tex_table(bench, entries, filepostfix="", sumdir=""):
entry_header_line = perm_fields_header + entry_header_line * nallocators
entry_header_line = entry_header_line[:-1] + "\\\\"
- fname = os.path.join(sumdir, ".".join([bench.name, filepostfix, "tex"]))
+ fname = os.path.join(sumdir, ".".join([bench.name, file_postfix, "tex"]))
with open(fname, "w") as tex_file:
print("\\documentclass{standalone}", file=tex_file)
print("\\usepackage{booktabs}", file=tex_file)