aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbench.py22
-rw-r--r--bench_conprod.py46
-rw-r--r--bench_loop.py45
-rw-r--r--bench_mysql.py17
-rw-r--r--benchmark.py15
5 files changed, 97 insertions, 48 deletions
diff --git a/bench.py b/bench.py
index b7b6592..1ecec73 100755
--- a/bench.py
+++ b/bench.py
@@ -1,21 +1,41 @@
#!/usr/bin/env python3
+import argparse
+
from bench_loop import loop
from bench_conprod import conprod
from bench_mysql import mysql
+parser = argparse.ArgumentParser(description="benchmark memory allocators")
+parser.add_argument("-s", "--save", help="save benchmark results to disk", action='store_true')
+parser.add_argument("-l", "--load", help="load benchmark results from disk", action='store_true')
+parser.add_argument("-r", "--runs", help="how often the benchmarks run", default=3)
+
+
benchmarks = [loop, conprod, mysql]
def main():
+ args = parser.parse_args()
+ print (args)
+
for bench in benchmarks:
+ if args.load:
+ bench.load()
+
print("Preparing", bench.name)
if not bench.prepare():
continue
+
print("Running", bench.name)
- if not bench.run(runs=1):
+ if not bench.run(runs=args.runs):
continue
+
+ if args.save:
+ bench.save()
+
print("Summarizing", bench.name)
bench.summary()
+
if hasattr(bench, "cleanup"):
print("Cleaning after", bench.name)
bench.cleanup()
diff --git a/bench_conprod.py b/bench_conprod.py
index 21e4b13..60a9889 100644
--- a/bench_conprod.py
+++ b/bench_conprod.py
@@ -6,14 +6,16 @@ import numpy as np
import os
import subprocess
+from benchmark import Benchmark
from common_targets import common_targets
cmd = ("perf stat -x\; -e cpu-clock:k,cache-references,cache-misses,cycles,"
"instructions,branches,faults,migrations "
"build/bench_conprod{0} {1} {1} {1} 1000000 {2}")
-class Benchmark_ConProd():
+class Benchmark_ConProd( Benchmark ):
def __init__(self):
+ self.file_name = "bench_conprod"
self.name = "Consumer Producer Stress Benchmark"
self.descrition = """This benchmark makes 1000000 allocations in each of
n producer threads. The allocations are shared through n
@@ -22,8 +24,9 @@ class Benchmark_ConProd():
self.maxsize = [2 ** x for x in range(6, 16)]
self.nthreads = range(1, multiprocessing.cpu_count() + 1)
- self.results = {}
-
+ self.results = {"args" : {"nthreads" : self.nthreads, "maxsize" : self.maxsize},
+ "targets" : self.targets}
+
def prepare(self, verbose=False):
req = ["build/bench_conprod"]
for r in req:
@@ -36,9 +39,8 @@ class Benchmark_ConProd():
if verbose:
print(r, "found and executable.")
return True
-
- def run(self, verbose=False, save=False, runs=3):
+ def run(self, verbose=False, runs=3):
args_permutations = [(x,y) for x in self.nthreads for y in self.maxsize]
n = len(args_permutations)
for run in range(1, runs + 1):
@@ -95,38 +97,40 @@ class Benchmark_ConProd():
self.results[key].append(result)
print()
- if save:
- with open(self.name + ".save", "wb") as f:
- pickle.dump(self.results, f)
return True
def summary(self):
# MAXSIZE fixed
- for size in self.maxsize:
- for target in self.targets:
- y_vals = [0] * len(self.nthreads)
+ nthreads = self.results["args"]["nthreads"]
+ maxsize = self.results["args"]["maxsize"]
+ targets = self.results["targets"]
+
+ y_mapping = {v : i for i, v in enumerate(nthreads)}
+ for size in maxsize:
+ for target in targets:
+ y_vals = [0] * len(nthreads)
for mid, measures in self.results.items():
if mid[0] == target and mid[2] == size:
d = []
for m in measures:
# nthreads/time = MOPS/S
d.append(mid[1]/float(m["cpu-clock:ku"]))
- y_vals[mid[1]-1] = np.mean(d)
- plt.plot(self.nthreads, y_vals, label=target, linestyle='-', marker='.')
+ y_vals[y_mapping[mid[1]]] = np.mean(d)
+ plt.plot(nthreads, y_vals, label=target, linestyle='-', marker='.')
plt.legend()
plt.xlabel("consumers/producers")
plt.ylabel("MOPS/s")
plt.title("Consumer Producer: " + str(size) + "B")
- plt.savefig("Conprod." + str(size) + "B.png")
+ plt.savefig(self.file_name + "." + str(size) + "B.png")
plt.clf()
# NTHREADS fixed
- y_mapping = {v : i for i, v in enumerate(self.maxsize)}
- x_vals = [i + 1 for i in range(0, len(self.maxsize))]
- for n in self.nthreads:
- for target in self.targets:
- y_vals = [0] * len(self.maxsize)
+ y_mapping = {v : i for i, v in enumerate(maxsize)}
+ x_vals = [i + 1 for i in range(0, len(maxsize))]
+ for n in nthreads:
+ for target in targets:
+ y_vals = [0] * len(maxsize)
for mid, measures in self.results.items():
if mid[0] == target and mid[1] == n:
d = []
@@ -137,11 +141,11 @@ class Benchmark_ConProd():
plt.plot(x_vals, y_vals, label=target, linestyle='-', marker='.')
plt.legend()
- plt.xticks(x_vals, self.maxsize)
+ plt.xticks(x_vals, maxsize)
plt.xlabel("size in B")
plt.ylabel("MOPS/s")
plt.title("Consumer Producer: " + str(n) + "thread(s)")
- plt.savefig("Conprod." + str(n) + "thread.png")
+ plt.savefig(self.file_name + "." + str(n) + "thread.png")
plt.clf()
conprod = Benchmark_ConProd()
diff --git a/bench_loop.py b/bench_loop.py
index c48e682..f5606c6 100644
--- a/bench_loop.py
+++ b/bench_loop.py
@@ -6,14 +6,16 @@ import numpy as np
import os
import subprocess
+from benchmark import Benchmark
from common_targets import common_targets
cmd = ("perf stat -x\; -e cpu-clock:k,cache-references,cache-misses,cycles,"
"instructions,branches,faults,migrations "
"build/bench_loop{} 1.2 {} 1000000 {} 10")
-class Benchmark_Loop():
+class Benchmark_Loop( Benchmark ):
def __init__(self):
+ self.file_name = "bench_loop"
self.name = "Loop Stress Benchmark"
self.descrition = """This benchmark makes n allocations in t concurrent threads.
How allocations are freed can be changed with the benchmark
@@ -22,8 +24,9 @@ class Benchmark_Loop():
self.maxsize = [2 ** x for x in range(6, 16)]
self.nthreads = range(1, multiprocessing.cpu_count() * 2 + 1)
- self.results = {}
-
+ self.results = {"args" : {"nthreads" : self.nthreads, "maxsize": self.maxsize},
+ "targets" : self.targets}
+
def prepare(self, verbose=False):
req = ["build/bench_loop"]
for r in req:
@@ -38,7 +41,7 @@ class Benchmark_Loop():
return True
- def run(self, verbose=False, save=False, runs=3):
+ def run(self, verbose=False, runs=3):
args_permutations = [(x,y) for x in self.nthreads for y in self.maxsize]
n = len(args_permutations)
for run in range(1, runs + 1):
@@ -95,38 +98,40 @@ class Benchmark_Loop():
self.results[key].append(result)
print()
- if save:
- with open(self.name + ".save", "wb") as f:
- pickle.dump(self.results, f)
return True
def summary(self):
# MAXSIZE fixed
- for size in self.maxsize:
- for target in self.targets:
- y_vals = [0] * len(self.nthreads)
+ nthreads = self.results["args"]["nthreads"]
+ maxsize = self.results["args"]["maxsize"]
+ targets = self.results["targets"]
+
+ y_mapping = {v : i for i, v in enumerate(nthreads)}
+ for size in maxsize:
+ for target in targets:
+ y_vals = [0] * len(nthreads)
for mid, measures in self.results.items():
if mid[0] == target and mid[2] == size:
d = []
for m in measures:
# nthreads/time = MOPS/S
d.append(mid[1]/float(m["cpu-clock:ku"]))
- y_vals[mid[1]-1] = np.mean(d)
- plt.plot(self.nthreads, y_vals, marker='.',linestyle='-', label=target)
+ y_vals[y_mapping[mid[1]]] = np.mean(d)
+ plt.plot(nthreads, y_vals, marker='.', linestyle='-', label=target)
plt.legend()
plt.xlabel("threads")
plt.ylabel("MOPS/s")
plt.title("Loop: " + str(size) + "B")
- plt.savefig("Loop." + str(size) + "B.png")
+ plt.savefig(self.file_name + "." + str(size) + "B.png")
plt.clf()
# NTHREADS fixed
- y_mapping = {v : i for i, v in enumerate(self.maxsize)}
- x_vals = [i + 1 for i in range(0, len(self.maxsize))]
- for n in self.nthreads:
- for target in self.targets:
- y_vals = [0] * len(self.maxsize)
+ y_mapping = {v : i for i, v in enumerate(maxsize)}
+ x_vals = [i + 1 for i in range(0, len(maxsize))]
+ for n in nthreads:
+ for target in targets:
+ y_vals = [0] * len(maxsize)
for mid, measures in self.results.items():
if mid[0] == target and mid[1] == n:
d = []
@@ -137,11 +142,11 @@ class Benchmark_Loop():
plt.plot(x_vals, y_vals, marker='.', linestyle='-', label=target)
plt.legend()
- plt.xticks(x_vals, self.maxsize)
+ plt.xticks(x_vals, maxsize)
plt.xlabel("size in B")
plt.ylabel("MOPS/s")
plt.title("Loop: " + str(n) + "thread(s)")
- plt.savefig("Loop." + str(n) + "thread.png")
+ plt.savefig(self.file_name + "." + str(n) + "threads.png")
plt.clf()
loop = Benchmark_Loop()
diff --git a/bench_mysql.py b/bench_mysql.py
index f3ce9cf..0627537 100644
--- a/bench_mysql.py
+++ b/bench_mysql.py
@@ -10,6 +10,7 @@ import shutil
import subprocess
from time import sleep
+from benchmark import Benchmark
from common_targets import common_targets
cwd = os.getcwd()
@@ -23,8 +24,9 @@ cmd = ("sysbench oltp_read_only --threads={} --time=10 --max-requests=0 "
server_cmd = "mysqld -h {0}/mysql_test --socket={0}/mysql_test/socket".format(cwd).split(" ")
-class Benchmark_MYSQL():
+class Benchmark_MYSQL( Benchmark ):
def __init__(self):
+ self.file_name = "bench_mysql"
self.name = "MYSQL Stress Benchmark"
self.descrition = """See sysbench documentation."""
self.targets = copy.copy(common_targets)
@@ -32,7 +34,7 @@ class Benchmark_MYSQL():
self.nthreads = range(1, multiprocessing.cpu_count() * 2 + 1)
self.results = {}
-
+
def start_and_wait_for_server(self, env, verbose, log=None):
if not log:
log = os.devnull
@@ -148,15 +150,18 @@ class Benchmark_MYSQL():
return True
def summary(self):
- for target in self.targets:
- y_vals = [0] * len(self.nthreads)
+ nthreads = self.results["args"]["nthreads"]
+ targets = self.results["targets"]
+
+ for target in targets:
+ y_vals = [0] * len(nthreads)
for mid, measures in self.results.items():
if mid[0] == target:
d = []
for m in measures:
d.append(int(m["transactions"]))
- y_vals[mid[1]-1] = np.mean(d)
- plt.plot(self.nthreads, y_vals, label=target, linestyle='-', marker='.')
+ y_vals[mid[1]-nthreads[0]] = np.mean(d)
+ plt.plot(nthreads, y_vals, label=target, linestyle='-', marker='.')
plt.legend()
plt.xlabel("threads")
diff --git a/benchmark.py b/benchmark.py
new file mode 100644
index 0000000..9b7316a
--- /dev/null
+++ b/benchmark.py
@@ -0,0 +1,15 @@
+import pickle
+
+class Benchmark (object):
+ def save(self, verbose=False):
+ if verbose:
+ print("Saving results to:", self.file_name + ".save")
+ with open(self.file_name + ".save", "wb") as f:
+ pickle.dump(self.results, f)
+
+ def load(self, verbose=False):
+ if verbose:
+ print("Loading results from:", self.file_name + ".save")
+ with open(self.file_name + ".save", "rb") as f:
+ self.results = pickle.load(f)
+