aboutsummaryrefslogtreecommitdiff
path: root/src/benchmarks/falsesharing.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/benchmarks/falsesharing.py')
-rw-r--r--src/benchmarks/falsesharing.py60
1 files changed, 34 insertions, 26 deletions
diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py
index 671dbc8..530ca99 100644
--- a/src/benchmarks/falsesharing.py
+++ b/src/benchmarks/falsesharing.py
@@ -14,7 +14,6 @@
#
# You should have received a copy of the GNU General Public License
# along with allocbench. If not, see <http://www.gnu.org/licenses/>.
-
"""Definition of the falsesahring benchmark"""
import re
@@ -25,7 +24,6 @@ import numpy as np
from src.benchmark import Benchmark
from src.globalvars import summary_file_ext
-
TIME_RE = re.compile("^Time elapsed = (?P<time>\\d*\\.\\d*) seconds.$")
@@ -36,14 +34,15 @@ class BenchmarkFalsesharing(Benchmark):
times. If the allocated objects are on the same cache line the writes
will be expensive because of cache thrashing.
"""
-
def __init__(self):
name = "falsesharing"
self.cmd = "cache-{bench}{binary_suffix} {threads} 100 8 10000000"
- self.args = {"bench": ["thrash", "scratch"],
- "threads": Benchmark.scale_threads_for_cpus(1)}
+ self.args = {
+ "bench": ["thrash", "scratch"],
+ "threads": Benchmark.scale_threads_for_cpus(1)
+ }
self.requirements = ["cache-thrash", "cache-scratch"]
super().__init__(name)
@@ -62,13 +61,18 @@ class BenchmarkFalsesharing(Benchmark):
for allocator in allocators:
sequential_perm = self.Perm(bench=bench, threads=1)
- for perm in self.iterate_args_fixed({"bench": bench}, args=args):
+ for perm in self.iterate_args_fixed({"bench": bench},
+ args=args):
speedup = []
l1chache_misses = []
for i, measure in enumerate(self.results[allocator][perm]):
- sequential_time = float(self.results[allocator][sequential_perm][i]["time"])
- measure["speedup"] = sequential_time / float(measure["time"])
- measure["l1chache_misses"] = eval("({L1-dcache-load-misses}/{L1-dcache-loads})*100".format(**measure))
+ sequential_time = float(self.results[allocator]
+ [sequential_perm][i]["time"])
+ measure["speedup"] = sequential_time / float(
+ measure["time"])
+ measure["l1chache_misses"] = eval(
+ "({L1-dcache-load-misses}/{L1-dcache-loads})*100".
+ format(**measure))
# delete and recalculate stats
del self.results["stats"]
@@ -81,23 +85,27 @@ class BenchmarkFalsesharing(Benchmark):
autoticks=False,
fixed=["bench"])
- self.plot_fixed_arg("{l1chache_misses}",
- ylabel="'l1 cache misses in %'",
- title="'cache misses: ' + arg + ' ' + str(arg_value)",
- filepostfix="l1-misses",
- autoticks=False,
- fixed=["bench"])
-
- self.plot_fixed_arg("({LLC-load-misses}/{LLC-loads})*100",
- ylabel="'llc cache misses in %'",
- title="'LLC misses: ' + arg + ' ' + str(arg_value)",
- filepostfix="llc-misses",
- autoticks=False,
- fixed=["bench"])
-
- self.write_tex_table([{"label": "Speedup",
- "expression": "{speedup}",
- "sort":">"}],
+ self.plot_fixed_arg(
+ "{l1chache_misses}",
+ ylabel="'l1 cache misses in %'",
+ title="'cache misses: ' + arg + ' ' + str(arg_value)",
+ filepostfix="l1-misses",
+ autoticks=False,
+ fixed=["bench"])
+
+ self.plot_fixed_arg(
+ "({LLC-load-misses}/{LLC-loads})*100",
+ ylabel="'llc cache misses in %'",
+ title="'LLC misses: ' + arg + ' ' + str(arg_value)",
+ filepostfix="llc-misses",
+ autoticks=False,
+ fixed=["bench"])
+
+ self.write_tex_table([{
+ "label": "Speedup",
+ "expression": "{speedup}",
+ "sort": ">"
+ }],
filepostfix="speedup.table")
self.export_stats_to_csv("speedup", "time")