aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/benchmark.py45
-rw-r--r--src/benchmarks/blowup.py13
-rw-r--r--src/benchmarks/cfrac.py39
-rw-r--r--src/benchmarks/dj_trace.py51
-rw-r--r--src/benchmarks/espresso.py40
-rw-r--r--src/benchmarks/falsesharing.py48
-rw-r--r--src/benchmarks/fd.py26
-rw-r--r--src/benchmarks/httpd.py48
-rw-r--r--src/benchmarks/keydb.py24
-rw-r--r--src/benchmarks/larson.py27
-rw-r--r--src/benchmarks/loop.py38
-rw-r--r--src/benchmarks/mysql.py72
-rw-r--r--src/benchmarks/raxmlng.py26
-rw-r--r--src/benchmarks/realloc.py12
-rw-r--r--src/benchmarks/redis.py26
-rw-r--r--src/benchmarks/t_test1.py27
-rw-r--r--src/plots.py414
17 files changed, 567 insertions, 409 deletions
diff --git a/src/benchmark.py b/src/benchmark.py
index daa20be..d0489ac 100644
--- a/src/benchmark.py
+++ b/src/benchmark.py
@@ -248,28 +248,45 @@ class Benchmark:
else:
raise Exception("Requirement: {} not found".format(r))
- def iterate_args(self, args=None):
- """Iterator over each possible combination of args"""
+ def iterate_args(self, args=None, fixed=None):
+ """Iterator over each possible combination of args
+
+ Parameters
+ ----------
+ args : dict, optional, default=None
+ Dictionary of arguments and iterables with their possible values.
+ If not provided defaults to :rc:`self.args`
+
+ fixed : dict, optional, default=None
+ Mapping of arguments to one of their values. The yielded result
+ contains this value. If not provided defaults to :rc:`{}`.
+
+ Returns
+ -------
+ perm : :rc:`self.Perm`
+ A namedtuple containing one permutation of the benchmark's arguments.
+
+ Examples
+ --------
+ args = {"a1": [1,2], "a2": ["foo", "bar"]}
+
+ self.iterate_args(args=args) yields [(1, "foo"), (2, "foo"), (1, "bar"), (2,"bar")]
+ self.iterate_args(args, {"a2":"bar"}) yields [(1, "bar"), (2, "bar")]
+ self.iterate_args(args, {"a1":2, "a2":"foo"}) yields [(2, "foo")]"""
if not args:
args = self.args
- for p in itertools.product(*[args[k] for k in args.keys()]):
- yield self.Perm(*p)
+ if not fixed:
+ fixed = {}
- def iterate_args_fixed(self, fixed, args=None):
- """Iterator over each possible combination of args containing all fixed values
-
- self.args = {"a1": [1,2], "a2": ["foo", "bar"]}
- self.iterate_args_fixed({"a1":1}) yields [(1, "foo"), (1, "bar")
- self.iterate_args_fixed({"a2":"bar"}) yields [(1, "bar"), (2, "bar")
- self.iterate_args_fixed({"a1":2, "a2":"foo"}) yields only [(2, "foo")]"""
-
- for perm in self.iterate_args(args=args):
+ for perm in itertools.product(*[args[k] for k in args]):
+ perm = self.Perm(*perm)
p_dict = perm._asdict()
is_fixed = True
for arg in fixed:
if p_dict[arg] != fixed[arg]:
is_fixed = False
break
+
if is_fixed:
yield perm
@@ -581,7 +598,7 @@ class Benchmark:
self.results["stats"][alloc] = {}
- for perm in self.iterate_args(self.results["args"]):
+ for perm in self.iterate_args(args=self.results["args"]):
stats = {s: {} for s in ["min", "max", "mean", "median", "std",
"std_perc",
"lower_quartile", "upper_quartile",
diff --git a/src/benchmarks/blowup.py b/src/benchmarks/blowup.py
index 28692d6..a4e65ea 100644
--- a/src/benchmarks/blowup.py
+++ b/src/benchmarks/blowup.py
@@ -54,11 +54,14 @@ class BenchmarkBlowup(Benchmark):
}
}
- plt.barplot_single_arg(self,
- "{VmHWM}/1000",
- ylabel="VmHWM in MB",
- title="blowup test",
- file_postfix="vmhwm")
+ plt.plot(self,
+ "{VmHWM}/1000",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "VmHWM in MB",
+ 'title': "blowup test"
+ },
+ file_postfix="vmhwm")
plt.pgfplot(self,
self.iterate_args(self.results["args"]),
diff --git a/src/benchmarks/cfrac.py b/src/benchmarks/cfrac.py
index 9b4bc64..1f495e6 100644
--- a/src/benchmarks/cfrac.py
+++ b/src/benchmarks/cfrac.py
@@ -76,27 +76,36 @@ class BenchmarkCfrac(Benchmark):
def summary(self):
# Speed
- plt.barplot_single_arg(self,
- "{task-clock}/1000",
- ylabel='"cpu-second"',
- title='"Cfrac: runtime"',
- file_postfix="time")
+ plt.plot(self,
+ "{task-clock}/1000",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': 'cpu-second',
+ 'title': 'Cfrac: runtime',
+ },
+ file_postfix="time")
# L1 cache misses
- plt.barplot_single_arg(
+ plt.plot(
self,
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
- ylabel="L1 misses in %",
- title="Cfrac l1 cache misses",
- file_postfix="l1misses",
- yerr=False)
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "L1 misses in %",
+ 'title': "Cfrac l1 cache misses",
+ 'yerr': False
+ },
+ file_postfix="l1misses")
# Memusage
- plt.barplot_single_arg(self,
- "{VmHWM}",
- ylabel="VmHWM in KB",
- title="Cfrac VmHWM",
- file_postfix="vmhwm")
+ plt.plot(self,
+ "{VmHWM}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "VmHWM in KB",
+ 'title': "Cfrac VmHWM",
+ },
+ file_postfix="vmhwm")
plt.write_tex_table(self, [{
"label": "Runtime [ms]",
diff --git a/src/benchmarks/dj_trace.py b/src/benchmarks/dj_trace.py
index b6ae83d..1b1c65e 100644
--- a/src/benchmarks/dj_trace.py
+++ b/src/benchmarks/dj_trace.py
@@ -173,34 +173,14 @@ class BenchmarkDJTrace(Benchmark):
args = self.results["args"]
allocators = self.results["allocators"]
- cpu_time_means = {allocator: {} for allocator in allocators}
- cycles_means = {allocator: {} for allocator in allocators}
- for perm in self.iterate_args(args=args):
- for i, allocator in enumerate(allocators):
- data = [x["cputime"] for x in self.results[allocator][perm]]
- # data is in milliseconds
- cpu_time_means[allocator][perm] = np.mean(data) / 1000
-
- data = [x["cycles"] for x in self.results[allocator][perm]]
- cycles_means[allocator][perm] = np.mean(data)
-
- plt.bar([i],
- cpu_time_means[allocator][perm],
- label=allocator,
- color=allocators[allocator]["color"])
-
- plt.legend(loc="best")
- plt.ylabel("time in ms")
- plt.title(f"Runtime {perm.workload}")
- plt.savefig(".".join(
- [self.name, perm.workload, "runtime", summary_file_ext]))
- plt.clf()
-
- abplt.barplot_single_arg(self,
- "{cputime}/1000",
- ylabel="time in ms",
- title="total runtime",
- file_postfix="runtime")
+ abplt.plot(self,
+ "{cputime}/1000",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "time in ms",
+ 'title': "total runtime",
+ },
+ file_postfix="runtime")
# Function Times
func_times_means = {allocator: {} for allocator in allocators}
@@ -258,11 +238,14 @@ class BenchmarkDJTrace(Benchmark):
}
}
- abplt.barplot_single_arg(self,
- "{Max_RSS}/1000",
- ylabel="Max RSS in MB",
- title="Max RSS (VmHWM)",
- file_postfix="newrss")
+ abplt.plot(self,
+ "{Max_RSS}/1000",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "Max RSS in MB",
+ 'title': "Max RSS (VmHWM)",
+ }
+ file_postfix="newrss")
# self.barplot_fixed_arg("{Max_RSS}/1000",
# ylabel='"Max RSS in MB"',
@@ -394,7 +377,7 @@ class BenchmarkDJTrace(Benchmark):
file=f)
for perm in self.iterate_args(args=args):
- cycles = cycles_means[allocator][perm]
+ cycles = abplt._get_y_data(self, "{cycles}", allocator, perm)[0]
times = [t for t in func_times_means[allocator][perm]]
rss = rss_means[allocator][perm]
print(fmt.format(perm.workload, cycles, times[0], times[1],
diff --git a/src/benchmarks/espresso.py b/src/benchmarks/espresso.py
index 6012d02..b0b9c09 100644
--- a/src/benchmarks/espresso.py
+++ b/src/benchmarks/espresso.py
@@ -80,27 +80,33 @@ class BenchmarkEspresso(Benchmark):
def summary(self):
# Speed
- plt.barplot_single_arg(self,
- "{task-clock}/1000",
- ylabel="cpu-second",
- title="Espresso: runtime",
- file_postfix="time")
+ plt.plot(self,
+ "{task-clock}/1000",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "cpu-second",
+ 'title': "Espresso: runtime",
+ },
+ file_postfix="time")
# L1 cache misses
- plt.barplot_single_arg(
- self,
- "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
- ylabel="L1 misses in %",
- title="Espresso l1 cache misses",
- file_postfix="l1misses",
- yerr=False)
+ plt.plot(self,
+ "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+ fig_options={
+ 'ylabel': "L1 misses in %",
+ 'title': "Espresso l1 cache misses",
+ 'yerr': False
+ },
+ file_postfix="l1misses")
# Memusage
- plt.barplot_single_arg(self,
- "{VmHWM}",
- ylabel="VmHWM in KB",
- title="Espresso VmHWM",
- file_postfix="vmhwm")
+ plt.plot(self,
+ "{VmHWM}",
+ fig_options={
+ 'ylabel': "VmHWM in KB",
+ 'title': "Espresso VmHWM",
+ }
+ file_postfix="vmhwm")
plt.write_tex_table(self, [{
"label": "Runtime [ms]",
diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py
index 5c18367..6735aa4 100644
--- a/src/benchmarks/falsesharing.py
+++ b/src/benchmarks/falsesharing.py
@@ -62,8 +62,7 @@ class BenchmarkFalsesharing(Benchmark):
for allocator in allocators:
sequential_perm = self.Perm(bench=bench, threads=1)
- for perm in self.iterate_args_fixed({"bench": bench},
- args=args):
+ for perm in self.iterate_args({"bench": bench}, args=args):
speedup = []
l1chache_misses = []
for i, measure in enumerate(self.results[allocator][perm]):
@@ -79,30 +78,25 @@ class BenchmarkFalsesharing(Benchmark):
del self.results["stats"]
self.calc_desc_statistics()
- plt.plot_fixed_arg(self,
- "{speedup}",
- ylabel="Speedup",
- title="Speedup: {arg} {arg_value}",
- file_postfix="speedup",
- autoticks=False,
- fixed=["bench"])
-
- plt.plot_fixed_arg(
- self,
- "{l1chache_misses}",
- ylabel="l1 cache misses in %",
- title="cache misses: {arg} {arg_value}",
- file_postfix="l1-misses",
- autoticks=False,
- fixed=["bench"])
-
- # plt.plot_fixed_arg(self,
- # "({LLC-load-misses}/{LLC-loads})*100",
- # ylabel="llc cache misses in %",
- # title="LLC misses: {arg} {arg_value}",
- # file_postfix="llc-misses",
- # autoticks=False,
- # fixed=["bench"])
+ plt.plot(self,
+ "{speedup}",
+ x_args=["bench"],
+ fig_options={
+ 'ylabel': "Speedup",
+ 'title': "Speedup: {arg} {arg_value}",
+ 'autoticks': False,
+ },
+ file_postfix="speedup")
+
+ plt.plot(self,
+ "{l1chache_misses}",
+ x_args=["bench"]
+ fig_options={
+ 'ylabel': "l1 cache misses in %",
+ 'title': "cache misses: {arg} {arg_value}",
+ 'autoticks': False,
+ },
+ file_postfix="l1-misses")
plt.write_tex_table(self, [{
"label": "Speedup",
@@ -117,7 +111,7 @@ class BenchmarkFalsesharing(Benchmark):
# pgfplots
for bench in args["bench"]:
plt.pgfplot(self,
- self.iterate_args_fixed({"bench": bench}, args=args),
+ self.iterate_args({"bench": bench}, args=args),
"int(perm.threads)",
"{speedup}",
xlabel="Threads",
diff --git a/src/benchmarks/fd.py b/src/benchmarks/fd.py
index 6f88878..1fdd96a 100644
--- a/src/benchmarks/fd.py
+++ b/src/benchmarks/fd.py
@@ -67,19 +67,25 @@ class BenchmarkFd(Benchmark):
os.link(src, dest)
def summary(self):
- plt.barplot_single_arg(self,
- "{task-clock}",
- ylabel="runtime in ms",
- title="fd runtime",
- file_postfix="runtime")
+ plt.plot(self,
+ "{task-clock}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "runtime in ms",
+ 'title': "fd runtime",
+ },
+ file_postfix="runtime")
plt.export_stats_to_dataref(self, "task-clock")
- plt.barplot_single_arg(self,
- "{VmHWM}",
- ylabel="VmHWM in KB",
- title="fd memusage",
- file_postfix="memusage")
+ plt.plot(self,
+ "{VmHWM}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': "VmHWM in KB",
+ 'title': "fd memusage"
+ },
+ file_postfix="memusage")
plt.export_stats_to_dataref(self, "VmHWM")
diff --git a/src/benchmarks/httpd.py b/src/benchmarks/httpd.py
index 8332f27..c46461f 100644
--- a/src/benchmarks/httpd.py
+++ b/src/benchmarks/httpd.py
@@ -60,29 +60,35 @@ class BenchmarkHTTPD(Benchmark):
"Requests per second:\\s*(\\d*\\.\\d*) .*", stdout).group(1)
def summary(self):
- plt.plot_fixed_arg(self,
- "{requests}",
- xlabel="threads",
- ylabel="requests/s",
- autoticks=False,
- file_postfix="requests",
- title="{perm.site}: requests/s")
+ plt.plot(self,
+ "{requests}",
+ fig_options={
+ 'xlabel': "threads",
+ 'ylabel': "requests/s",
+ 'title': "{perm.site}: requests/s"
+ 'autoticks': False,
+ },
+ file_postfix="requests")
- plt.plot_fixed_arg(self,
- "{nginx_vmhwm}",
- xlabel="threads",
- ylabel="VmHWM in KB",
- title="{perm.site}: nginx memory usage",
- file_postfix="httpd_vmhwm",
- autoticks=False)
+ plt.plot(self,
+ "{nginx_vmhwm}",
+ fig_options={
+ 'xlabel': "threads",
+ 'ylabel': "VmHWM in KB",
+ 'title': "{perm.site}: nginx memory usage",
+ 'autoticks': False,
+ },
+ file_postfix="httpd_vmhwm")
- plt.plot_fixed_arg(self,
- "{php-fpm_vmhwm}",
- xlabel="threads",
- ylabel="VmHWM in KB",
- title="{perm.site}: php-fpm memory usage",
- file_postfix="php-fpm_vmhwm",
- autoticks=False)
+ plt.plot(self,
+ "{php-fpm_vmhwm}",
+ fig_options={
+ 'xlabel': "threads",
+ 'ylabel': "VmHWM in KB",
+ 'title': "{perm.site}: php-fpm memory usage",
+ 'autoticks': False,
+ },
+ file_postfix="php-fpm_vmhwm")
httpd = BenchmarkHTTPD()
diff --git a/src/benchmarks/keydb.py b/src/benchmarks/keydb.py
index 343d356..8502e21 100644
--- a/src/benchmarks/keydb.py
+++ b/src/benchmarks/keydb.py
@@ -110,14 +110,20 @@ class BenchmarkKeyDB(Benchmark):
os.remove("dump.rdb")
def summary(self):
- plt.plot_fixed_arg(self, "{totals_ops}",
- ylabel="'OPS/second'",
- title="KeyDB Operations - {arg}: {arg_value}",
- file_postfix="total_ops")
-
- plt.plot_fixed_arg(self, "{keydb_vmhwm}",
- ylabel="'VmHWM [KB]'",
- title="KeyDB Memusage - {arg}: {arg_value}",
- file_postfix="vmhwm")
+ plt.plot(self,
+ "{totals_ops}",
+ fig_options={
+ 'ylabel': "'OPS/second'",
+ 'title': "KeyDB Operations - {arg}: {arg_value}",
+ },
+ file_postfix="total_ops")
+
+ plt.plot(self,
+ "{keydb_vmhwm}",
+ fig_options={
+ 'ylabel': "'VmHWM [KB]'",
+ 'title': "KeyDB Memusage - {arg}: {arg_value}",
+ },
+ file_postfix="vmhwm")
keydb = BenchmarkKeyDB()
diff --git a/src/benchmarks/larson.py b/src/benchmarks/larson.py
index d2e9286..db38789 100644
--- a/src/benchmarks/larson.py
+++ b/src/benchmarks/larson.py
@@ -81,18 +81,21 @@ class BenchmarkLarson(Benchmark):
def summary(self):
# Plot threads->throughput and maxsize->throughput
- plt.plot_fixed_arg(self,
- "{throughput}/1000000",
- ylabel="MOPS/s",
- title="Larson: {arg} {arg_value}",
- file_postfix="throughput")
-
- plt.plot_fixed_arg(
- self,
- "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
- ylabel="l1 cache misses in %",
- title="Larson cache misses: {arg} {arg_value}",
- file_postfix="cachemisses")
+ plt.plot(self,
+ "{throughput}/1000000",
+ fig_options={
+ 'ylabel': "MOPS/s",
+ 'title': "Larson: {arg} {arg_value}",
+ },
+ file_postfix="throughput")
+
+ plt.plot(self,
+ "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+ fig_options={
+ 'ylabel': "l1 cache misses in %",
+ 'title': "Larson cache misses: {arg} {arg_value}",
+ },
+ file_postfix="cachemisses")
larson = BenchmarkLarson()
diff --git a/src/benchmarks/loop.py b/src/benchmarks/loop.py
index 81d55cb..3c9bbac 100644
--- a/src/benchmarks/loop.py
+++ b/src/benchmarks/loop.py
@@ -61,19 +61,20 @@ class BenchmarkLoop(Benchmark):
def summary(self):
# Speed
- plt.plot_fixed_arg(self,
- "{mops}",
- ylabel="MOPS/cpu-second",
- title="Loop: {arg} {arg_value}",
- file_postfix="time",
- autoticks=False)
+ plt.plot(
+ self,
+ "{mops}",
+ ylabel="MOPS/cpu-second",
+ title="Loop: {fixed_part_str}",
+ file_postfix="time",
+ autoticks=False)
# L1 cache misses
- plt.plot_fixed_arg(
+ plt.plot(
self,
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
ylabel="L1 misses in %",
- title="Loop l1 cache misses: {arg} {arg_value}",
+ title="Loop l1 cache misses: {fixed_part_str}",
file_postfix="l1misses",
autoticks=False)
@@ -83,20 +84,21 @@ class BenchmarkLoop(Benchmark):
"{mops}",
file_postfix="time.matrix")
- plt.write_tex_table(self, [{
- "label": "MOPS/s",
- "expression": "{mops}",
- "sort": ">"
- }],
- file_postfix="mops.table")
+ plt.write_tex_table(
+ self,
+ [{
+ "label": "MOPS/s",
+ "expression": "{mops}",
+ "sort": ">"
+ }],
+ file_postfix="mops.table")
- # plt.export_stats_to_csv(self, "task-clock")
- # plt.export_stats_to_dataref(self, "task-clock")
+ plt.export_stats_to_csv(self, "task-clock")
+ plt.export_stats_to_dataref(self, "task-clock")
# pgfplot test
plt.pgfplot(self,
- self.iterate_args_fixed({"maxsize": 1024},
- args=self.results["args"]),
+ self.iterate_args({"maxsize": 1024}, self.results["args"]),
"int(perm.threads)",
"{mops}",
xlabel="Threads",
diff --git a/src/benchmarks/mysql.py b/src/benchmarks/mysql.py
index a5b215c..0df85d2 100644
--- a/src/benchmarks/mysql.py
+++ b/src/benchmarks/mysql.py
@@ -201,42 +201,60 @@ class BenchmarkMYSQL(Benchmark):
args = self.results["args"]
# linear plot
- plt.plot_single_arg(self, "{transactions}",
- xlabel='"threads"',
- ylabel='"transactions"',
- title='"sysbench oltp read only"',
- file_postfix="l")
+ plt.plot(self,
+ "{transactions}",
+ fig_options={
+ 'xlabel': 'threads',
+ 'ylabel': 'transactions',
+ 'title': 'sysbench oltp read only',
+ },
+ file_postfix="l")
# normalized linear plot
ref_alloc = list(allocators)[0]
- plt.plot_single_arg(self, "{transactions}",
- xlabel='"threads"',
- ylabel='"transactions scaled at " + scale',
- title='"sysbench oltp read only"',
- file_postfix="norm.l",
- scale=ref_alloc)
+ plt.plot(self,
+ "{transactions}",
+ fig_options={
+ 'xlabel': 'threads',
+ 'ylabel': 'transactions scaled at {scale}',
+ 'title': 'sysbench oltp read only',
+ },
+ file_postfix="norm.l",
+ scale=ref_alloc)
# bar plot
- plt.barplot_single_arg(self, "{transactions}",
- xlabel='"threads"',
- ylabel='"transactions"',
- title='"sysbench oltp read only"',
- file_postfix="b")
+ plt.plot(self,
+ "{transactions}",
+ plot_type='bar',
+ fig_options={
+ 'xlabel': 'threads',
+ 'ylabel': 'transactions',
+ 'title': 'sysbench oltp read only',
+ },
+ file_postfix="b")
# normalized bar plot
- plt.barplot_single_arg(self, "{transactions}",
- xlabel='"threads"',
- ylabel='"transactions scaled at " + scale',
- title='"sysbench oltp read only"',
- file_postfix="norm.b",
- scale=ref_alloc)
+ plt.plot(self,
+ "{transactions}",
+ plot_type='bar',
+ fig_options={
+ 'xlabel': 'threads',
+ 'ylabel': 'transactions scaled at {scale}',
+ 'title': 'sysbench oltp read only',
+ },
+ file_postfix="norm.b",
+ scale=ref_alloc)
# Memusage
- plt.barplot_single_arg(self, "{mysqld_vmhwm}",
- xlabel='"threads"',
- ylabel='"VmHWM in kB"',
- title='"Memusage sysbench oltp read only"',
- file_postfix="mem")
+ plt.plot(self,
+ "{mysqld_vmhwm}",
+ plot_type='bar',
+ fig_options={
+ 'xlabel': 'threads',
+ 'ylabel': 'VmHWM in kB',
+ 'title': 'Memusage sysbench oltp read only',
+ },
+ file_postfix="mem")
plt.write_tex_table(self, [{
"label": "Transactions",
diff --git a/src/benchmarks/raxmlng.py b/src/benchmarks/raxmlng.py
index 8c8b878..ef77d2d 100644
--- a/src/benchmarks/raxmlng.py
+++ b/src/benchmarks/raxmlng.py
@@ -82,19 +82,25 @@ class BenchmarkRaxmlng(Benchmark):
result["runtime"] = RUNTIME_RE.search(stdout).group("runtime")
def summary(self):
- plt.barplot_single_arg(self,
- "{runtime}",
- ylabel='"runtime in s"',
- title='"raxml-ng tree inference benchmark"',
- file_postfix="runtime")
+ plt.plot(self,
+ "{runtime}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': 'runtime in s',
+ 'title': 'raxml-ng tree inference benchmark',
+ },
+ file_postfix="runtime")
plt.export_stats_to_dataref(self, "runtime")
- plt.barplot_single_arg(self,
- "{VmHWM}",
- ylabel='"VmHWM in KB"',
- title='"raxml-ng memusage"',
- file_postfix="memusage")
+ plt.plot(self,
+ "{VmHWM}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': 'VmHWM in KB',
+ 'title': 'raxml-ng memusage',
+ },
+ file_postfix="memusage")
plt.export_stats_to_dataref(self, "VmHWM")
diff --git a/src/benchmarks/realloc.py b/src/benchmarks/realloc.py
index bd8f801..d3375ff 100644
--- a/src/benchmarks/realloc.py
+++ b/src/benchmarks/realloc.py
@@ -34,10 +34,14 @@ class BenchmarkRealloc(Benchmark):
super().__init__(name)
def summary(self):
- plt.barplot_single_arg(self, "{task-clock}",
- ylabel='"task-clock in ms"',
- title='"realloc micro benchmark"',
- file_postfix="time")
+ plt.plot(self,
+ "{task-clock}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': 'task-clock in ms',
+ 'title': 'realloc micro benchmark',
+ },
+ file_postfix="time")
plt.export_stats_to_csv(self, "task-clock")
plt.export_stats_to_dataref(self, "task-clock")
diff --git a/src/benchmarks/redis.py b/src/benchmarks/redis.py
index cfad489..265d825 100644
--- a/src/benchmarks/redis.py
+++ b/src/benchmarks/redis.py
@@ -81,15 +81,23 @@ class BenchmarkRedis(Benchmark):
os.remove("dump.rdb")
def summary(self):
- plt.barplot_single_arg(self, "{requests}",
- ylabel='"requests per s"',
- title='"redis throughput"',
- file_postfix="requests")
-
- plt.barplot_single_arg(self, "{redis_vmhwm}",
- ylabel='"VmHWM in KB"',
- title='"redis memusage"',
- file_postfix="vmhwm")
+ plt.plot(self,
+ "{requests}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': 'requests per s',
+ 'title': 'redis throughput',
+ },
+ file_postfix="requests")
+
+ plt.plot(self,
+ "{redis_vmhwm}",
+ plot_type='bar',
+ fig_options={
+ 'ylabel': '"VmHWM in KB"',
+ 'title': '"redis memusage"',
+ },
+ file_postfix="vmhwm")
plt.export_stats_to_dataref(self, "requests")
diff --git a/src/benchmarks/t_test1.py b/src/benchmarks/t_test1.py
index f0856f6..2d86bdc 100644
--- a/src/benchmarks/t_test1.py
+++ b/src/benchmarks/t_test1.py
@@ -42,19 +42,24 @@ class BenchmarkTTest1(Benchmark):
# mops / per second
yval = "perm.nthreads / ({task-clock}/1000)"
# Speed
- plt.plot_fixed_arg(self, yval,
- ylabel='"Mops / CPU second"',
- title='"T-Ttest1: " + arg + " " + str(arg_value)',
- file_postfix="time",
- autoticks=False)
+ plt.plot(self,
+ yval,
+ fig_options={
+ 'ylabel': 'Mops / CPU second',
+ 'title': 't-test1: {fixed_part_str}',
+ 'autoticks': False,
+ },
+ file_postfix="time")
# L1 cache misses
- plt.plot_fixed_arg(self,
+ plt.plot(self,
"({L1-dcache-load-misses}/{L1-dcache-loads})*100",
- ylabel='"L1 misses in %"',
- title='"T-Test1 l1 cache misses: " + arg + " " + str(arg_value)',
- file_postfix="l1misses",
- autoticks=False)
+ fig_options={
+ 'ylabel': 'L1 misses in %',
+ 'title': 't-test1 l1 cache misses: {fixed_part_str}',
+ 'autoticks': False,
+ },
+ file_postfix="l1misses")
# Speed Matrix
plt.write_best_doublearg_tex_table(self, yval, file_postfix="mops.matrix")
@@ -64,7 +69,7 @@ class BenchmarkTTest1(Benchmark):
"expression": yval,
"sort": ">"
}],
- file_postfix="mops.table")
+ file_postfix="mops.table")
plt.export_stats_to_csv(self, "task-clock")
diff --git a/src/plots.py b/src/plots.py
index a22a201..3228e5b 100644
--- a/src/plots.py
+++ b/src/plots.py
@@ -16,12 +16,13 @@
# along with allocbench. If not, see <http://www.gnu.org/licenses/>.
"""Plot different graphs from allocbench results"""
-import os
-import traceback
-
+import copy
+import itertools
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
+import os
+import traceback
import src.globalvars
from src.util import print_debug, print_warn
@@ -29,6 +30,38 @@ from src.util import print_debug, print_warn
# This is useful when evaluating strings in the plot functions. str(np.NaN) == "nan"
nan = np.NaN
+DEFAULT_PLOT_OPTIONS = {
+ 'plot': {
+ 'marker': '.',
+ 'linestyle': '-',
+ },
+ 'errorbar': {
+ 'marker': '.',
+ 'linestyle': '-',
+ 'yerr': True,
+ },
+ 'bar': {
+ 'yerr': True,
+ }
+}
+
+DEFAULT_FIG_OPTIONS = {
+ 'plot': {
+ 'legend': True,
+ 'legend_pos': 'best',
+ 'autoticks': True,
+ },
+ 'errorbar': {
+ 'legend': True,
+ 'legend_pos': 'best',
+ 'autoticks': True,
+ },
+ 'bar': {
+ 'legend': True,
+ 'legend_pos': 'lower center',
+ 'autoticks': False,
+ }
+}
def _get_alloc_color(bench, alloc):
"""Populate all not set allocator colors with matplotlib 'C' colors"""
@@ -81,197 +114,246 @@ def _get_y_data(bench, expression, allocator, perms, stat="mean", scale=None):
return y_data
+def _create_plot_options(plot_type, **kwargs):
+ """
+ Create a plot options dictionary.
+
+ Parameters
+ ----------
+ plot_type : str
+ The plot type for which the options should be created.
+ Possible values: {'bar', 'errorbar', 'plot'}
+
+ **kwargs : plot properties, optional
+ *kwargs* are used to specify properties like a line label (for
+ auto legends), linewidth, antialiasing, marker face color.
+
+ Returns
+ -------
+ options : dict
+ Dict holding the specified options and all default values for plot type
+ """
-def _save_figure(fig,
- fig_name,
- sumdir='',
- file_ext=src.globalvars.summary_file_ext):
- fig_path = os.path.join(sumdir, f"{fig_name}.{file_ext}")
- if file_ext == "tex":
- import tikzplotlib
- tikzplotlib.save(fig_path)
- else:
- fig.savefig(fig_path)
+ options = copy.deepcopy(DEFAULT_PLOT_OPTIONS[plot_type])
+ for key, value in kwargs.items():
+ options[key] = value
+ return options
-def plot_single_arg(bench,
- yval,
- ylabel="y-label",
- xlabel="x-label",
- autoticks=True,
- title="default title",
- file_postfix="",
- sumdir="",
- arg="",
- scale=None,
- file_ext=src.globalvars.summary_file_ext):
- """plot line graphs for each permutation of the benchmark's command arguments"""
+def _create_figure_options(plot_type, fig_label, **kwargs):
+ """
+ Create a figure options dictionary
+
+ Parameters
+ ----------
+ plot_type : str
+ The plot type for which the options should be created.
+ Possible values: {'bar', 'errorbar', 'plot'}
+
+ **kwargs : figure properties, optional
+ *kwargs* are used to specify properties like legends, legend position,
+ x-/ and ylabel, and title.
+
+ Returns
+ -------
+ options : dict
+ Dict holding the specified options and all default values for plot type
+ """
- args = bench.results["args"]
- allocators = bench.results["allocators"]
+ options = copy.deepcopy(DEFAULT_FIG_OPTIONS[plot_type])
- arg = arg or list(args.keys())[0]
+ options['fig_label'] = fig_label
- fig_name = f'{bench.name}.{file_postfix}'
- fig = plt.figure(fig_name)
+ for key, value in kwargs.items():
+ options[key] = value
- if not autoticks:
- x_vals = list(range(1, len(args[arg]) + 1))
- else:
- x_vals = args[arg]
+ return options
+
+def _plot(bench,
+ allocators,
+ y_expression,
+ x_data,
+ perms,
+ plot_type,
+ plot_options,
+ fig_options,
+ scale=None,
+ file_postfix="",
+ sumdir="",
+ file_ext=src.globalvars.summary_file_ext):
+ """
+ Create a plot for a given expression
+ Parameters
+ ----------
+
+ Returns
+ -------
+ figure : :rc:`~matplotlib.figure.Figure`
+ The new :rc:`.Figure` instance wrapping our plot.
+
+ Notes
+ -----
+ If you are creating many figures, make sure you explicitly call
+ :rc:`.pyplot.close` on the figures you are not using, because this will
+ enable pyplot to properly clean up the memory.
+ """
+
+ fig = plt.figure(fig_options['fig_label'])
for allocator in allocators:
- y_vals = _get_y_data(bench,
- yval,
+ y_data = _get_y_data(bench,
+ y_expression,
allocator,
- bench.iterate_args(args=args),
+ perms,
stat='mean',
scale=scale)
- plt.plot(x_vals,
- y_vals,
- marker='.',
- linestyle='-',
- label=allocator,
- color=_get_alloc_color(bench, allocator))
-
- plt.legend(loc="best")
- if not autoticks:
- plt.xticks(x_vals, args[arg])
- label_substitutions = vars()
- label_substitutions.update(vars(bench))
- plt.xlabel(xlabel.format(**label_substitutions))
- plt.ylabel(ylabel.format(**label_substitutions))
- plt.title(title.format(**label_substitutions))
- _save_figure(fig, fig_name, sumdir, file_ext)
- plt.close(fig)
+ if plot_options.get('yerr', False):
+ plot_options['yerr'] = _get_y_data(bench,
+ y_expression,
+ allocator,
+ perms,
+ stat='mean')
+ try:
+ plot_func = getattr(plt, plot_type)
+ except AttributeError:
+ print_debug(f'Unknown plot type: {plot_type}')
+ raise
+
+ _x_data = x_data
+ if not fig_options['autoticks']:
+ _x_data = list(range(1, len(x_data) + 1))
+
+ plot_func(_x_data,
+ y_data,
+ label=allocator,
+ color=_get_alloc_color(bench, allocator),
+ **plot_options)
+
+ if fig_options['legend']:
+ plt.legend(loc=fig_options['legend_pos'])
+
+ if not fig_options['autoticks']:
+ plt.xticks(_x_data, x_data)
+
+ plt.xlabel(fig_options['xlabel'])
+ plt.ylabel(fig_options['ylabel'])
+ plt.title(fig_options['title'])
+
+ fig_path = os.path.join(sumdir, f'{fig_options["fig_label"]}.{file_ext}')
+ if file_ext == 'tex':
+ import tikzplotlib
+ tikzplotlib.save(fig_path)
+ else:
+ fig.savefig(fig_path)
return fig
+def plot(bench,
+ y_expression,
+ plot_type='errorbar',
+ x_args=None,
+ scale=None,
+ plot_options=None,
+ fig_options=None,
+ file_postfix="",
+ sumdir="",
+ file_ext=src.globalvars.summary_file_ext):
+ """
+ Create plots for a given expression for the y axis.
+
+ Parameters
+ ----------
+
+ y_expression : str
+
+ plot_type : str, optional, default='errorbar'
+ The plot type for which the options should be created.
+ Possible values: {'bar', 'errorbar', 'plot'}
+
+ x_args : [str], optional, default=None
+ The benchmark arguments for which a plot should be created.
+ If not provided, defaults to :rc:`bench.arguments.keys()`
+
+ scale : str, optional, default=None
+ Name of the allocator which should be used to normalize the results.
+
+ plot_options : dict, optional, default None
+ Dictionary containing plot options which should be passed to the plot
+ type function. If not provided the default plot type options are used.
+ Possible options:
+ * yerr: bool - Plot the standard deviation as errorbars
+ * marker: str - Style of the used markers
+ * line: str - Style of the drawn lines
+
+ fig_options : dict, optional, default None
+ Dictionary containing figure options.
+ If not provided the default plot type options are used.
+ Possible options:
+ * ylabel : str - The label of the y axis.
+ * xlabel : str - The label of the x axis.
+ * title : str - The title of the plot.
+ * legend : bool - Should the plot have a legend.
+ * legend_pos : str - Location of the legend.
+ For possible values see :rc:`help(matplotlib.pyploy.legend)`.
+ * autoticks : bool - Let matplotlib set the xticks automatically.
+
+ file_postfix: str, optional, default=""
+ Postfix which is appended to the plot's file name.
+
+ sumdir : path or str, optional, default=""
+ Directory where the plot should be saved. If not provided defaults
+ to the current working directory.
+
+ file_ext : str, optional, default=:rc:`src.globalvars.summary_file_ext`
+ File extension of the saved plot. If not provided defaults to the
+ value of :rc:`src.globalvars.summary_file_ext`
-def barplot_single_arg(bench,
- yval,
- ylabel="y-label",
- xlabel="x-label",
- title="default title",
- file_postfix="",
- sumdir="",
- arg="",
- scale=None,
- file_ext=src.globalvars.summary_file_ext,
- yerr=True):
- """plot bar plots for each permutation of the benchmark's command arguments"""
+ """
args = bench.results["args"]
allocators = bench.results["allocators"]
- nallocators = len(allocators)
- if arg:
- arg = args[arg]
- elif args.keys():
- arg = args[list(args.keys())[0]]
- else:
- arg = [""]
+ if not x_args:
+ x_args = args
- narg = len(arg)
+ for loose_arg in x_args:
+ x_data = args[loose_arg]
- fig_name = f'{bench.name}.{file_postfix}'
- fig = plt.figure(fig_name)
+ fixed_args = [[(k, v) for v in args[k]] for k in args if k != loose_arg]
+ for fixed_part in itertools.product(*fixed_args):
+ fixed_part = {k:v for k, v in fixed_part}
- for i, allocator in enumerate(allocators):
- x_vals = list(range(i, narg * (nallocators + 1), nallocators + 1))
- y_vals = _get_y_data(bench,
- yval,
- allocator,
- bench.iterate_args(args=args),
- stat='mean',
- scale=scale)
- y_errs = None
- if yerr:
- y_vals = _get_y_data(bench,
- yval,
- allocator,
- bench.iterate_args(args=args),
- stat='std')
-
- plt.bar(x_vals,
- y_vals,
- width=1,
- label=allocator,
- yerr=y_errs,
- color=_get_alloc_color(bench, allocator))
-
- plt.legend(loc="best")
- plt.xticks(
- list(
- range(int(np.floor(nallocators / 2)), narg * (nallocators + 1),
- nallocators + 1)), arg)
+ fixed_part_str = ".".join([f'{k}={v}' for k, v in fixed_part.items()])
+ fig_label = f'{bench.name}.{fixed_part_str}.{file_postfix}'
- label_substitutions = vars()
- label_substitutions.update(vars(bench))
- plt.xlabel(xlabel.format(**label_substitutions))
- plt.ylabel(ylabel.format(**label_substitutions))
- plt.title(title.format(**label_substitutions))
-
- _save_figure(fig, fig_name, sumdir, file_ext)
- plt.close(fig)
+ plot_options = _create_plot_options(plot_type, **plot_options or {})
+ substitutions = vars()
+ substitutions.update(vars(bench))
+ for option, value in fig_options.items():
+ if isinstance(value, str):
+ fig_options[option] = value.format(**substitutions)
-def plot_fixed_arg(bench,
- yval,
- ylabel="y-label",
- xlabel="{loose_arg}",
- autoticks=True,
- title="default title",
- file_postfix="",
- sumdir="",
- fixed=None,
- file_ext=src.globalvars.summary_file_ext,
- scale=None):
+ # plot specific defaults
+ fig_options.setdefault("ylabel", y_expression)
+ fig_options.setdefault("xlabel", loose_arg)
+ fig_options.setdefault("titel", fig_label)
- args = bench.results["args"]
- allocators = bench.results["allocators"]
+ fig_options = _create_figure_options(
+ plot_type,
+ fig_label,
+ **fig_options or {},
+ )
- for arg in fixed or args:
- loose_arg = [a for a in args if a != arg][0]
-
- if not autoticks:
- x_vals = list(range(1, len(args[loose_arg]) + 1))
- else:
- x_vals = args[loose_arg]
-
- for arg_value in args[arg]:
- fig_name = f'{bench.name}.{arg}.{arg_value}.{file_postfix}'
- fig = plt.figure(fig_name)
-
- for allocator in allocators:
- y_vals = _get_y_data(bench,
- yval,
- allocator,
- bench.iterate_args_fixed({arg: arg_value},
- args=args),
- stat='mean',
- scale=scale)
-
- plt.plot(x_vals,
- y_vals,
- marker='.',
- linestyle='-',
- label=allocator,
- color=_get_alloc_color(bench, allocator))
-
- plt.legend(loc="best")
- if not autoticks:
- plt.xticks(x_vals, args[loose_arg])
-
- label_substitutions = vars()
- label_substitutions.update(vars(bench))
- plt.xlabel(xlabel.format(**label_substitutions))
- plt.ylabel(ylabel.format(**label_substitutions))
- plt.title(title.format(**label_substitutions))
-
- _save_figure(fig, fig_name, sumdir, file_ext)
- plt.close(fig)
+ _plot(bench,
+ allocators,
+ y_expression,
+ x_data,
+ list(bench.iterate_args(args=args, fixed=fixed_part)),
+ plot_type,
+ plot_options,
+ fig_options)
def export_facts_to_file(bench, comment_symbol, output_file):
@@ -392,7 +474,7 @@ def write_best_doublearg_tex_table(bench,
cell_text = []
for arg_value in rows:
row = []
- for perm in bench.iterate_args_fixed({row_arg: arg_value}, args=args):
+ for perm in bench.iterate_args(args=args, fixed={row_arg: arg_value}):
best = []
best_val = None
for allocator in allocators: