aboutsummaryrefslogtreecommitdiff
path: root/src/benchmarks/falsesharing.py
diff options
context:
space:
mode:
authorFlorian Fischer <florian.fl.fischer@fau.de>2019-03-25 17:49:39 +0100
committerFlorian Fischer <florian.fl.fischer@fau.de>2019-03-25 17:49:39 +0100
commit25c4d81069f576354d0279bf38417c236e924540 (patch)
treee5b953bc96220f07c86bf01ac0f900751b08543e /src/benchmarks/falsesharing.py
parent5c4ee34ec788ab0a59fe10c125452323d4b67d98 (diff)
downloadallocbench-25c4d81069f576354d0279bf38417c236e924540.tar.gz
allocbench-25c4d81069f576354d0279bf38417c236e924540.zip
move benchmark definitions into src/benchmarks
bench now loads all *.py files from src/benchmarks as benchmarks
Diffstat (limited to 'src/benchmarks/falsesharing.py')
-rw-r--r--src/benchmarks/falsesharing.py74
1 files changed, 74 insertions, 0 deletions
diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py
new file mode 100644
index 0000000..f6375a6
--- /dev/null
+++ b/src/benchmarks/falsesharing.py
@@ -0,0 +1,74 @@
+import matplotlib.pyplot as plt
+import numpy as np
+import re
+
+from src.benchmark import Benchmark
+
+time_re = re.compile("^Time elapsed = (?P<time>\d*\.\d*) seconds.$")
+
+
+class Benchmark_Falsesharing(Benchmark):
+ def __init__(self):
+ self.name = "falsesharing"
+ self.descrition = """This benchmarks makes small allocations and writes
+ to them multiple times. If the allocated objects
+ are on the same cache line the writes will be
+ expensive because of cache thrashing."""
+
+ self.cmd = "cache-{bench}{binary_suffix} {threads} 100 8 1000000"
+
+ self.args = {
+ "bench": ["thrash", "scratch"],
+ "threads": Benchmark.scale_threads_for_cpus(2)
+ }
+
+ self.requirements = ["cache-thrash", "cache-scratch"]
+ super().__init__()
+
+ def process_output(self, result, stdout, stderr, allocator, perm, verbose):
+ result["time"] = time_re.match(stdout).group("time")
+
+ def summary(self):
+ # Speedup thrash
+ args = self.results["args"]
+ nthreads = args["threads"]
+ allocators = self.results["allocators"]
+
+ for bench in self.results["args"]["bench"]:
+ for allocator in allocators:
+ y_vals = []
+
+ single_threaded_perm = self.Perm(bench=bench, threads=1)
+ single_threaded = np.mean([float(m["time"])
+ for m in self.results[allocator][single_threaded_perm]])
+
+ for perm in self.iterate_args_fixed({"bench": bench}, args=args):
+
+ d = [float(m["time"]) for m in self.results[allocator][perm]]
+
+ y_vals.append(single_threaded / np.mean(d))
+
+ plt.plot(nthreads, y_vals, marker='.', linestyle='-',
+ label=allocator, color=allocators[allocator]["color"])
+
+ plt.legend()
+ plt.xlabel("threads")
+ plt.ylabel("speedup")
+ plt.title(bench + " speedup")
+ plt.savefig(self.name + "." + bench + ".png")
+ plt.clf()
+
+ self.plot_fixed_arg("({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+ ylabel="'l1 cache misses in %'",
+ title="'cache misses: ' + arg + ' ' + str(arg_value)",
+ filepostfix="l1-misses",
+ fixed=["bench"])
+
+ self.plot_fixed_arg("({LLC-load-misses}/{LLC-loads})*100",
+ ylabel="'l1 cache misses in %'",
+ title="'LLC misses: ' + arg + ' ' + str(arg_value)",
+ filepostfix="llc-misses",
+ fixed=["bench"])
+
+
+falsesharing = Benchmark_Falsesharing()