aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Fischer <florian.fl.fischer@fau.de>2018-07-15 16:19:26 +0200
committerFlorian Fischer <florian.fl.fischer@fau.de>2018-07-15 16:19:26 +0200
commit0d6091831f64ebcb28fe4f495ca21815f0ddb0a6 (patch)
tree933b11524ced55ac7e85f14c7157423dc3e29bdc
parentb6cd4823e102dece17928c9b998a7b2287cb4b95 (diff)
downloadallocbench-0d6091831f64ebcb28fe4f495ca21815f0ddb0a6.tar.gz
allocbench-0d6091831f64ebcb28fe4f495ca21815f0ddb0a6.zip
add initial mysql, loop and consumer/producer benchmarks
-rwxr-xr-xbench.py24
-rw-r--r--bench_conprod.py144
-rw-r--r--bench_loop.py144
-rw-r--r--bench_mysql.py210
4 files changed, 522 insertions, 0 deletions
diff --git a/bench.py b/bench.py
new file mode 100755
index 0000000..b7b6592
--- /dev/null
+++ b/bench.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+
+from bench_loop import loop
+from bench_conprod import conprod
+from bench_mysql import mysql
+
+benchmarks = [loop, conprod, mysql]
+
+def main():
+ for bench in benchmarks:
+ print("Preparing", bench.name)
+ if not bench.prepare():
+ continue
+ print("Running", bench.name)
+ if not bench.run(runs=1):
+ continue
+ print("Summarizing", bench.name)
+ bench.summary()
+ if hasattr(bench, "cleanup"):
+ print("Cleaning after", bench.name)
+ bench.cleanup()
+
+if __name__ == "__main__":
+ main()
diff --git a/bench_conprod.py b/bench_conprod.py
new file mode 100644
index 0000000..a47a8e6
--- /dev/null
+++ b/bench_conprod.py
@@ -0,0 +1,144 @@
+import csv
+import pickle
+import matplotlib.pyplot as plt
+import multiprocessing
+import numpy as np
+import os
+import subprocess
+
+from common_targets import common_targets
+
+cmd = ("perf stat -x\; -e cpu-clock:k,cache-references,cache-misses,cycles,"
+ "instructions,branches,faults,migrations "
+ "build/memusage build/bench_conprod{0} {1} {1} {1} 1000000 {2}")
+
+class Benchmark_ConProd():
+ def __init__(self):
+ self.name = "Consumer Producer Stress Benchmark"
+ self.descrition = """This benchmark makes 1000000 allocations in each of
+ n producer threads. The allocations are shared through n
+ synchronisation objects and freed/consumed by n threads."""
+ self.targets = common_targets
+ self.maxsize = [2 ** x for x in range(6, 16)]
+ self.nthreads = range(1, multiprocessing.cpu_count() + 1)
+
+ self.results = {}
+
+ def prepare(self, verbose=False):
+ req = ["build/bench_conprod", "build/memusage"]
+ for r in req:
+ if not os.path.isfile(r):
+ print(r, "not found")
+ return False
+ if not os.access(r, os.X_OK):
+ print(r, "not found")
+ return False
+ if verbose:
+ print(r, "found and executable.")
+ return True
+
+
+ def run(self, verbose=False, save=False, runs=3):
+ args_permutations = [(x,y) for x in self.nthreads for y in self.maxsize]
+ n = len(args_permutations)
+ for run in range(1, runs + 1):
+ print(str(run) + ". run")
+
+ for i, args in enumerate(args_permutations):
+ print(i + 1, "of", n, "\r", end='')
+
+ # run cmd for each target
+ for tname, t in self.targets.items():
+
+ env = {"LD_PRELOAD" : t[1]} if t[1] != "" else None
+
+ target_cmd = cmd.format(t[0], *args).split(" ")
+ if verbose:
+ print("\n" + tname, t, "\n", " ".join(target_cmd), "\n")
+
+ p = subprocess.run(target_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+ env=env, universal_newlines=True)
+ if p.returncode != 0:
+ print("\n" + " ".join(target_cmd), "exited with", p.returncode, ".\n Aborting Benchmark.")
+ print(tname, t)
+ print(p.stderr)
+ print(p.stdout)
+ return False
+
+ if "ERROR: ld.so" in p.stderr:
+ print("\nPreloading of", t[1], "failed for", tname, ".\n Aborting Benchmark.")
+ return False
+
+ output = p.stderr.split("# End memusage\n")
+ if len(output) != 2:
+ print()
+ print(output)
+ print(tname, t)
+ print("Aborting output is not correct")
+
+ result = {}
+ # Strip all whitespace from memusage output
+ result["memusage"] = [x.replace(" ", "").replace("\t", "")
+ for x in output[0].splitlines()]
+
+ # Handle perf output
+ csvreader = csv.reader(output[1].splitlines(), delimiter=';')
+ for row in csvreader:
+ result[row[2].replace("\\", "")] = row[0].replace("\\", "")
+ key = (tname, *args)
+ if not key in self.results:
+ self.results[key] = [result]
+ else:
+ self.results[key].append(result)
+
+ print()
+ if save:
+ with open(self.name + ".save", "wb") as f:
+ pickle.dump(self.results, f)
+ return True
+
+ def summary(self):
+ # MAXSIZE fixed
+ for size in self.maxsize:
+ for target in self.targets:
+ y_vals = [0] * len(self.nthreads)
+ for mid, measures in self.results.items():
+ if mid[0] == target and mid[2] == size:
+ d = []
+ for m in measures:
+ # nthreads/time = MOPS/S
+ d.append(mid[1]/float(m["cpu-clock:ku"]))
+ y_vals[mid[1]-1] = np.mean(d)
+ plt.plot(self.nthreads, y_vals, label=target)
+
+ plt.legend()
+ plt.xlabel("consumers/producers")
+ plt.ylabel("MOPS/s")
+ plt.title("Consumer Producer: " + str(size) + "B")
+ plt.savefig("Conprod." + str(size) + "B.png")
+ plt.clf()
+
+ # NTHREADS fixed
+ y_mapping = {v : i for i, v in enumerate(self.maxsize)}
+ x_vals = [i + 1 for i in range(0, len(self.maxsize))]
+ for n in self.nthreads:
+ for target in self.targets:
+ y_vals = [0] * len(self.maxsize)
+ for mid, measures in self.results.items():
+ if mid[0] == target and mid[1] == n:
+ d = []
+ for m in measures:
+ # nthreads/time = MOPS/S
+ d.append(n/float(m["cpu-clock:ku"]))
+ y_vals[y_mapping[mid[2]]] = np.mean(d)
+ plt.plot(x_vals, y_vals, label=target)
+
+ plt.legend()
+ plt.xticks(x_vals, self.maxsize)
+ plt.xlabel("size in B")
+ plt.ylabel("MOPS/s")
+ plt.title("Consumer Producer: " + str(n) + "thread(s)")
+ plt.savefig("Conprod." + str(n) + "thread.png")
+ plt.clf()
+
+conprod = Benchmark_ConProd()
diff --git a/bench_loop.py b/bench_loop.py
new file mode 100644
index 0000000..8f11b61
--- /dev/null
+++ b/bench_loop.py
@@ -0,0 +1,144 @@
+import csv
+import pickle
+import matplotlib.pyplot as plt
+import multiprocessing
+import numpy as np
+import os
+import subprocess
+
+from common_targets import common_targets
+
+cmd = ("perf stat -x\; -e cpu-clock:k,cache-references,cache-misses,cycles,"
+ "instructions,branches,faults,migrations "
+ "build/memusage build/bench_loop{} 1.2 {} 1000000 {} 10")
+
+class Benchmark_Loop():
+ def __init__(self):
+ self.name = "Loop Stress Benchmark"
+ self.descrition = """This benchmark makes n allocations in t concurrent threads.
+ How allocations are freed can be changed with the benchmark
+ version""",
+ self.targets = common_targets
+ self.maxsize = [2 ** x for x in range(6, 16)]
+ self.nthreads = range(1, multiprocessing.cpu_count() * 2 + 1)
+
+ self.results = {}
+
+ def prepare(self, verbose=False):
+ req = ["build/bench_loop", "build/memusage"]
+ for r in req:
+ if not os.path.isfile(r):
+ print(r, "not found")
+ return False
+ if not os.access(r, os.X_OK):
+ print(r, "not found")
+ return False
+ if verbose:
+ print(r, "found and executable.")
+ return True
+
+
+ def run(self, verbose=False, save=False, runs=3):
+ args_permutations = [(x,y) for x in self.nthreads for y in self.maxsize]
+ n = len(args_permutations)
+ for run in range(1, runs + 1):
+ print(str(run) + ". run")
+
+ for i, args in enumerate(args_permutations):
+ print(i + 1, "of", n, "\r", end='')
+
+ # run cmd for each target
+ for tname, t in self.targets.items():
+
+ env = {"LD_PRELOAD" : t[1]} if t[1] != "" else None
+
+ target_cmd = cmd.format(t[0], *args).split(" ")
+ if verbose:
+ print("\n" + tname, t, "\n", " ".join(target_cmd), "\n")
+
+ p = subprocess.run(target_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+ env=env, universal_newlines=True)
+ if p.returncode != 0:
+ print("\n" + " ".join(target_cmd), "exited with", p.returncode, ".\n Aborting Benchmark.")
+ print(tname, t)
+ print(p.stderr)
+ print(p.stdout)
+ return False
+
+ if "ERROR: ld.so" in p.stderr:
+ print("\nPreloading of", t[1], "failed for", tname, ".\n Aborting Benchmark.")
+ return False
+
+ output = p.stderr.split("# End memusage\n")
+ if len(output) != 2:
+ print()
+ print(output)
+ print(tname, t)
+ print("Aborting output is not correct")
+
+ result = {}
+ # Strip all whitespace from memusage output
+ result["memusage"] = [x.replace(" ", "").replace("\t", "")
+ for x in output[0].splitlines()]
+
+ # Handle perf output
+ csvreader = csv.reader(output[1].splitlines(), delimiter=';')
+ for row in csvreader:
+ result[row[2].replace("\\", "")] = row[0].replace("\\", "")
+ key = (tname, *args)
+ if not key in self.results:
+ self.results[key] = [result]
+ else:
+ self.results[key].append(result)
+
+ print()
+ if save:
+ with open(self.name + ".save", "wb") as f:
+ pickle.dump(self.results, f)
+ return True
+
+ def summary(self):
+ # MAXSIZE fixed
+ for size in self.maxsize:
+ for target in self.targets:
+ y_vals = [0] * len(self.nthreads)
+ for mid, measures in self.results.items():
+ if mid[0] == target and mid[2] == size:
+ d = []
+ for m in measures:
+ # nthreads/time = MOPS/S
+ d.append(mid[1]/float(m["cpu-clock:ku"]))
+ y_vals[mid[1]-1] = np.mean(d)
+ plt.plot(self.nthreads, y_vals, label=target)
+
+ plt.legend()
+ plt.xlabel("threads")
+ plt.ylabel("MOPS/s")
+ plt.title("Loop: " + str(size) + "B")
+ plt.savefig("Loop." + str(size) + "B.png")
+ plt.clf()
+
+ # NTHREADS fixed
+ y_mapping = {v : i for i, v in enumerate(self.maxsize)}
+ x_vals = [i + 1 for i in range(0, len(self.maxsize))]
+ for n in self.nthreads:
+ for target in self.targets:
+ y_vals = [0] * len(self.maxsize)
+ for mid, measures in self.results.items():
+ if mid[0] == target and mid[1] == n:
+ d = []
+ for m in measures:
+ # nthreads/time = MOPS/S
+ d.append(n/float(m["cpu-clock:ku"]))
+ y_vals[y_mapping[mid[2]]] = np.mean(d)
+ plt.plot(x_vals, y_vals, label=target)
+
+ plt.legend()
+ plt.xticks(x_vals, self.maxsize)
+ plt.xlabel("size in B")
+ plt.ylabel("MOPS/s")
+ plt.title("Loop: " + str(n) + "thread(s)")
+ plt.savefig("Loop." + str(n) + "thread.png")
+ plt.clf()
+
+loop = Benchmark_Loop()
diff --git a/bench_mysql.py b/bench_mysql.py
new file mode 100644
index 0000000..5312c89
--- /dev/null
+++ b/bench_mysql.py
@@ -0,0 +1,210 @@
+import csv
+import io
+import matplotlib.pyplot as plt
+import multiprocessing
+import numpy as np
+import os
+import pickle
+import re
+import shutil
+import subprocess
+from time import sleep
+
+from common_targets import common_targets
+
+cwd = os.getcwd()
+
+prepare_cmd = ("sysbench oltp_read_only --db-driver=mysql --mysql-user=root "
+ "--mysql-socket="+cwd+"/mysql_test/socket --table-size=1000000 prepare").split(" ")
+
+cmd = ("sysbench oltp_read_only --threads={} --time=10 --max-requests=0 "
+ "--db-driver=mysql --mysql-user=root --mysql-socket={}/mysql_test/socket run")
+
+server_cmd = "mysqld -h {0}/mysql_test --socket={0}/mysql_test/socket".format(cwd).split(" ")
+
+
+class Benchmark_MYSQL():
+ def __init__(self):
+ self.name = "MYSQL Stress Benchmark"
+ self.descrition = """See sysbench documentation."""
+ self.targets = common_targets
+ del(self.targets["klmalloc"])
+ self.nthreads = range(1, multiprocessing.cpu_count() * 2 + 1)
+
+ self.results = {}
+
+ def start_and_wait_for_server(self, env, verbose, log=None):
+ if not log:
+ log = os.devnull
+
+ with open(log, "ab") as f:
+ self.server = subprocess.Popen(server_cmd, env=env,
+ stdout=f,
+ stderr=f,
+ universal_newlines=True)
+ #TODO make sure server comes up !!!!
+ sleep(5)
+ return True
+
+ def prepare(self, verbose=False):
+ ret = True
+ # Setup mysqld
+ if not os.path.exists("mysql_test"):
+ print("Prepare mysqld directory and database")
+ os.makedirs("mysql_test")
+ with open(os.devnull, "w") as devnull:
+ p = subprocess.run(["mysql_install_db", "--basedir=/usr",
+ "--datadir={}/mysql_test".format(os.getcwd())],
+ stdout=devnull, stderr=devnull)
+ ret = ret and p.returncode == 0
+ if not ret:
+ return ret
+
+ if not self.start_and_wait_for_server(None, verbose, "mysqld.log"):
+ print("Starting mysqld failed")
+ return False
+
+ p = subprocess.run("mysql -u root -S {}/mysql_test/socket".format(cwd).split(" "),
+ input = b"CREATE DATABASE sbtest;\n")
+ ret = ret and p.returncode == 0
+ if not ret:
+ return ret
+
+ print("Prepare test table")
+ subprocess.run(prepare_cmd)
+ self.server.kill()
+ ret = ret and self.server.wait() == -9
+
+ return ret
+
+ def cleanup(self):
+ if os.path.exists("mysql_test"):
+ print("Delete mysqld directory")
+ shutil.rmtree("mysql_test")
+
+
+ def run(self, verbose=False, save=False, runs=3):
+ cwd = os.getcwd()
+ for run in range(1, runs + 1):
+ print(str(run) + ". run")
+
+ # run cmd for each target
+ n = len(self.nthreads)
+ for tname, t in self.targets.items():
+ # No custom build mysqld server supported yet.
+ env = {"LD_PRELOAD" : t[1]} if t[1] != "" else None
+
+ if not self.start_and_wait_for_server(env, verbose, "mysqld.log"):
+ print("Can't start server for", tname + ".")
+ print("Aborting Benchmark.")
+ return False
+
+ for i in self.nthreads:
+ print(tname + ":", i, "of", n, "\r", end='')
+
+ target_cmd = cmd.format(i, cwd).split(" ")
+ p = subprocess.run(target_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+ universal_newlines=True)
+
+ if p.returncode != 0:
+ print("\n" + " ".join(target_cmd), "exited with", p.returncode, ".\n Aborting Benchmark.")
+ print(tname, t)
+ print(p.stderr)
+ print(p.stdout)
+ self.server.kill()
+ self.server.wait()
+ return False
+
+ result = {}
+ result["transactions"] = re.search("transactions:\s*(\d*)", p.stdout).group(1)
+ result["queries"] = re.search("queries:\s*(\d*)", p.stdout).group(1)
+ # Latency
+ result["min"] = re.search("min:\s*(\d*.\d*)", p.stdout).group(1)
+ result["avg"] = re.search("avg:\s*(\d*.\d*)", p.stdout).group(1)
+ result["max"] = re.search("max:\s*(\d*.\d*)", p.stdout).group(1)
+
+ key = (tname, i)
+ if not key in self.results:
+ self.results[key] = [result]
+ else:
+ self.results[key].append(result)
+
+ print()
+ self.server.kill()
+ self.server.wait()
+
+ # Strip all whitespace from memusage output
+ # result["memusage"] = [x.replace(" ", "").replace("\t", "")
+ # for x in output[0].splitlines()]
+ #
+ # # Handle perf output
+ # csvreader = csv.reader(output[1].splitlines(), delimiter=';')
+ # for row in csvreader:
+ # result[row[2].replace("\\", "")] = row[0].replace("\\", "")
+
+ if save:
+ with open(self.name + ".save", "wb") as f:
+ pickle.dump(self.results, f)
+ return True
+
+ def summary(self):
+ for target in self.targets:
+ y_vals = [0] * len(self.nthreads)
+ for mid, measures in self.results.items():
+ if mid[0] == target:
+ d = []
+ for m in measures:
+ d.append(int(m["transactions"]))
+ y_vals[mid[1]-1] = np.mean(d)
+ plt.plot(self.nthreads, y_vals, label=target)
+
+ plt.legend()
+ plt.xlabel("threads")
+ plt.ylabel("transactions")
+ plt.title("sysbench oltp read only")
+ plt.savefig("mysql.ro.png")
+ plt.clf()
+ # MAXSIZE fixed
+ # for size in self.maxsize:
+ # for target in self.targets:
+ # y_vals = [0] * len(self.nthreads)
+ # for mid, measures in self.results.items():
+ # if mid[0] == target and mid[2] == size:
+ # d = []
+ # for m in measures:
+ # # nthreads/time = MOPS/S
+ # d.append(mid[1]/float(m["cpu-clock:ku"]))
+ # y_vals[mid[1]-1] = np.mean(d)
+ # plt.plot(self.nthreads, y_vals, label=target)
+
+ # plt.legend()
+ # plt.xlabel("threads")
+ # plt.ylabel("MOPS/s")
+ # plt.title("Loop: " + str(size) + "B")
+ # plt.savefig("Loop." + str(size) + "B.png")
+ # plt.clf()
+
+ # # NTHREADS fixed
+ # y_mapping = {v : i for i, v in enumerate(self.maxsize)}
+ # x_vals = [i + 1 for i in range(0, len(self.maxsize))]
+ # for n in self.nthreads:
+ # for target in self.targets:
+ # y_vals = [0] * len(self.maxsize)
+ # for mid, measures in self.results.items():
+ # if mid[0] == target and mid[1] == n:
+ # d = []
+ # for m in measures:
+ # # nthreads/time = MOPS/S
+ # d.append(n/float(m["cpu-clock:ku"]))
+ # y_vals[y_mapping[mid[2]]] = np.mean(d)
+ # plt.plot(x_vals, y_vals, label=target)
+
+ # plt.legend()
+ # plt.xticks(x_vals, self.maxsize)
+ # plt.xlabel("size in B")
+ # plt.ylabel("MOPS/s")
+ # plt.title("Loop: " + str(n) + "thread(s)")
+ # plt.savefig("Loop." + str(n) + "thread.png")
+ # plt.clf()
+
+mysql = Benchmark_MYSQL()