aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Fischer <florian.fl.fischer@fau.de>2019-08-27 16:54:17 +0200
committerFlorian Fischer <florian.fl.fischer@fau.de>2019-08-27 16:54:17 +0200
commit93e3aac6315c17a74e63f02053a3e8e2e6b21928 (patch)
tree6f1c22dff617e05807a88c107389654cbaf7d090
parentce532bbe9c03e52f80d263ea02038e9b81696e89 (diff)
downloadallocbench-93e3aac6315c17a74e63f02053a3e8e2e6b21928.tar.gz
allocbench-93e3aac6315c17a74e63f02053a3e8e2e6b21928.zip
improve benchmark code quality using pylint
The description Benchmark member is replaced with class docstrings
-rw-r--r--src/benchmarks/__init__.py1
-rw-r--r--src/benchmarks/cfrac.py8
-rw-r--r--src/benchmarks/dj_trace.py139
-rw-r--r--src/benchmarks/espresso.py11
-rw-r--r--src/benchmarks/falsesharing.py36
-rw-r--r--src/benchmarks/httpd.py9
-rw-r--r--src/benchmarks/larson.py30
-rw-r--r--src/benchmarks/lld.py34
-rw-r--r--src/benchmarks/loop.py19
-rw-r--r--src/benchmarks/mysql.py36
-rw-r--r--src/benchmarks/realloc.py11
-rw-r--r--src/benchmarks/t_test1.py16
12 files changed, 190 insertions, 160 deletions
diff --git a/src/benchmarks/__init__.py b/src/benchmarks/__init__.py
index e69de29..7117bb2 100644
--- a/src/benchmarks/__init__.py
+++ b/src/benchmarks/__init__.py
@@ -0,0 +1 @@
+"""allocbench benchmark definitions"""
diff --git a/src/benchmarks/cfrac.py b/src/benchmarks/cfrac.py
index 44bd213..85537a9 100644
--- a/src/benchmarks/cfrac.py
+++ b/src/benchmarks/cfrac.py
@@ -1,9 +1,11 @@
+""" Definition of the cfrac benchmark"""
+
from src.benchmark import Benchmark
-class Benchmark_Cfrac(Benchmark):
+class BenchmarkCfrac(Benchmark):
+ """TODO"""
def __init__(self):
self.name = "cfrac"
- self.descrition = """TODO."""
self.cmd = "cfrac{binary_suffix} {num}"
@@ -37,4 +39,4 @@ class Benchmark_Cfrac(Benchmark):
self.export_stats_to_dataref("VmHWM")
-cfrac = Benchmark_Cfrac()
+cfrac = BenchmarkCfrac()
diff --git a/src/benchmarks/dj_trace.py b/src/benchmarks/dj_trace.py
index b262255..b837a9e 100644
--- a/src/benchmarks/dj_trace.py
+++ b/src/benchmarks/dj_trace.py
@@ -1,57 +1,57 @@
-import matplotlib.pyplot as plt
-import numpy as np
+"""Benchmark definition using the traces collected by DJ Delorie"""
+
import os
-from urllib.request import urlretrieve
import sys
import re
+from urllib.request import urlretrieve
+import matplotlib.pyplot as plt
+import numpy as np
from src.benchmark import Benchmark
from src.util import print_status
-comma_sep_number_re = "(?:\\d*(?:,\\d*)?)*"
-rss_re = "(?P<rss>" + comma_sep_number_re + ")"
-time_re = "(?P<time>" + comma_sep_number_re + ")"
-cycles_re = re.compile("^{} cycles$".format(time_re))
-cpu_time_re = re.compile("^{} usec across.*threads$".format(time_re))
+COMMA_SEP_NUMBER_RE = "(?:\\d*(?:,\\d*)?)*"
+RSS_RE = f"(?P<rss>{COMMA_SEP_NUMBER_RE})"
+TIME_RE = f"(?P<time>{COMMA_SEP_NUMBER_RE})"
+
+CYCLES_RE = re.compile(f"^{TIME_RE} cycles$")
+CPU_TIME_RE = re.compile(f"^{TIME_RE} usec across.*threads$")
+
+MAX_RSS_RE = re.compile(f"^{RSS_RE} Kb Max RSS")
+IDEAL_RSS_RE = re.compile("^{RSS_RE} Kb Max Ideal RSS")
-max_rss_re = re.compile("^{} Kb Max RSS".format(rss_re))
-ideal_rss_re = re.compile("^{} Kb Max Ideal RSS".format(rss_re))
+MALLOC_RE = re.compile(f"^Avg malloc time:\\s*{TIME_RE} in.*calls$")
+CALLOC_RE = re.compile(f"^Avg calloc time:\\s*{TIME_RE} in.*calls$")
+REALLOC_RE = re.compile(f"^Avg realloc time:\\s*{TIME_RE} in.*calls$")
+FREE_RE = re.compile(f"^Avg free time:\\s*{TIME_RE} in.*calls$")
-malloc_re = re.compile("^Avg malloc time:\\s*{} in.*calls$".format(time_re))
-calloc_re = re.compile("^Avg calloc time:\\s*{} in.*calls$".format(time_re))
-realloc_re = re.compile("^Avg realloc time:\\s*{} in.*calls$".format(time_re))
-free_re = re.compile("^Avg free time:\\s*{} in.*calls$".format(time_re))
+class BenchmarkDJTrace(Benchmark):
+ """DJ Trace Benchmark
+
+ This benchmark uses the workload simulator written by DJ Delorie to
+ simulate workloads provided by him under https://delorie.com/malloc. Those
+ workloads are generated from traces of real aplications and are also used
+ by delorie to measure improvements in the glibc allocator.
+ """
-class Benchmark_DJ_Trace(Benchmark):
def __init__(self):
self.name = "dj_trace"
- self.descrition = """This benchmark uses the workload simulator written
- by DJ Delorie to simulate workloads provided by
- him under https://delorie.com/malloc. Those
- workloads are generated from traces of real
- aplications and are also used by delorie to
- measure improvements in the glibc allocator."""
self.cmd = "trace_run{binary_suffix} dj_workloads/{workload}.wl"
self.measure_cmd = ""
- self.args = {
- "workload": [
- "389-ds-2",
- "dj",
- "dj2",
- "mt_test_one_alloc",
- "oocalc",
- "qemu-virtio",
- "qemu-win7",
- "proprietary-1",
- "proprietary-2",
- ]
- }
- self.results = {
- "389-ds-2": {
+ self.args = {"workload": ["389-ds-2",
+ "dj",
+ "dj2",
+ "mt_test_one_alloc",
+ "oocalc",
+ "qemu-virtio",
+ "qemu-win7",
+ "proprietary-1",
+ "proprietary-2"]}
+ self.results = {"389-ds-2": {
"malloc": 170500018, "calloc": 161787184,
"realloc": 404134, "free": 314856324,
"threads": 41},
@@ -79,8 +79,7 @@ class Benchmark_DJ_Trace(Benchmark):
"free": 319919727, "threads": 20},
"proprietary-2": {
"malloc": 9753948, "calloc": 4693,
- "realloc": 117, "free": 10099261, "threads": 19},
- }
+ "realloc": 117, "free": 10099261, "threads": 19}}
self.requirements = ["trace_run"]
super().__init__()
@@ -92,11 +91,11 @@ class Benchmark_DJ_Trace(Benchmark):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
- s = "\r%5.1f%% %*d / %d" % (
+ status = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
- sys.stderr.write(s)
+ sys.stderr.write(status)
else: # total size is unknown
- sys.stderr.write("\rdownloaded %d" % (readsofar,))
+ sys.stderr.write(f"\rdownloaded {readsofar}")
if not os.path.isdir("dj_workloads"):
os.mkdir("dj_workloads")
@@ -107,8 +106,8 @@ class Benchmark_DJ_Trace(Benchmark):
"proprietary-2": "92M", "qemu-win7": "23M",
"389-ds-2": "3.4G", "dj2": "294M"}
- for wl in self.args["workload"]:
- file_name = wl + ".wl"
+ for workload in self.args["workload"]:
+ file_name = workload + ".wl"
file_path = os.path.join("dj_workloads", file_name)
if not os.path.isfile(file_path):
if download_all is None:
@@ -119,47 +118,49 @@ class Benchmark_DJ_Trace(Benchmark):
else:
download_all = choice in ['', 'Y', 'y']
- if (not download_all and
- input("want to download {} ({}) [Y/n] ".format(wl, wl_sizes[wl])) not in ['', 'Y', 'y']):
- continue
+ if not download_all:
+ choice = input(f"want to download {workload} ({wl_sizes[workload]}) [Y/n] ")
+ if choice not in ['', 'Y', 'y']:
+ continue
- if download_all:
- print_status("downloading {} ({}) ...".format(wl, wl_sizes[wl]))
+ else:
+ print_status(f"downloading {workload} ({wl_sizes[workload]}) ...")
url = "http://www.delorie.com/malloc/" + file_name
urlretrieve(url, file_path, reporthook)
sys.stderr.write("\n")
available_workloads = []
- for wl in self.args["workload"]:
- file_name = wl + ".wl"
+ for workload in self.args["workload"]:
+ file_name = workload + ".wl"
file_path = os.path.join("dj_workloads", file_name)
if os.path.isfile(file_path):
- available_workloads.append(wl)
+ available_workloads.append(workload)
- if len(available_workloads) > 0:
+ if available_workloads:
self.args["workload"] = available_workloads
return True
return False
def process_output(self, result, stdout, stderr, allocator, perm, verbose):
- def to_int(s):
- return int(s.replace(',', ""))
+ def to_int(string):
+ return int(string.replace(',', ""))
+
- regexs = {7: malloc_re, 8: calloc_re, 9: realloc_re, 10: free_re}
+ regexs = {7: MALLOC_RE, 8: CALLOC_RE, 9: REALLOC_RE, 10: FREE_RE}
functions = {7: "malloc", 8: "calloc", 9: "realloc", 10: "free"}
- for i, l in enumerate(stdout.splitlines()):
+ for i, line in enumerate(stdout.splitlines()):
if i == 0:
- result["cycles"] = to_int(cycles_re.match(l).group("time"))
+ result["cycles"] = to_int(CYCLES_RE.match(line).group("time"))
elif i == 2:
- result["cputime"] = to_int(cpu_time_re.match(l).group("time"))
+ result["cputime"] = to_int(CPU_TIME_RE.match(line).group("time"))
elif i == 3:
- result["Max_RSS"] = to_int(max_rss_re.match(l).group("rss"))
+ result["Max_RSS"] = to_int(MAX_RSS_RE.match(line).group("rss"))
elif i == 4:
- result["Ideal_RSS"] = to_int(ideal_rss_re.match(l).group("rss"))
+ result["Ideal_RSS"] = to_int(IDEAL_RSS_RE.match(line).group("rss"))
elif i in [7, 8, 9, 10]:
- res = regexs[i].match(l)
+ res = regexs[i].match(line)
fname = functions[i]
result["avg_" + fname] = to_int(res.group("time"))
@@ -171,12 +172,12 @@ class Benchmark_DJ_Trace(Benchmark):
cycles_means = {allocator: {} for allocator in allocators}
for perm in self.iterate_args(args=args):
for i, allocator in enumerate(allocators):
- d = [x["cputime"] for x in self.results[allocator][perm]]
+ data = [x["cputime"] for x in self.results[allocator][perm]]
# data is in milliseconds
- cpu_time_means[allocator][perm] = np.mean(d)/1000
+ cpu_time_means[allocator][perm] = np.mean(data)/1000
- d = [x["cycles"] for x in self.results[allocator][perm]]
- cycles_means[allocator][perm] = np.mean(d)
+ data = [x["cycles"] for x in self.results[allocator][perm]]
+ cycles_means[allocator][perm] = np.mean(data)
plt.bar([i], cpu_time_means[allocator][perm], label=allocator,
color=allocators[allocator]["color"])
@@ -234,8 +235,8 @@ class Benchmark_DJ_Trace(Benchmark):
title='"Highwatermark of Vm (VmHWM)"',
filepostfix="newrss")
- del(allocators["Ideal_RSS"])
- del(self.results["stats"]["Ideal_RSS"])
+ del allocators["Ideal_RSS"]
+ del self.results["stats"]["Ideal_RSS"]
rss_means = {allocator: {} for allocator in allocators}
for perm in self.iterate_args(args=args):
@@ -337,7 +338,7 @@ class Benchmark_DJ_Trace(Benchmark):
# Changes. First allocator in allocators is the reference
fmt_changes = "{:<20} {:>14.0f}% {:>6.0f}% {:>6.0f}% {:>6.0f}% {:>6.0f}% {:>6.0f}%"
- for i, allocator in enumerate(list(allocators)[1:]):
+ for allocator in list(allocators)[1:]:
print("{0} Changes {1} {0}".format("-" * 10, allocator), file=f)
print(fmt.format("Workload", "Total", "malloc", "calloc",
"realloc", "free", "RSS"), file=f)
@@ -384,4 +385,4 @@ class Benchmark_DJ_Trace(Benchmark):
'\n', file=f)
-dj_trace = Benchmark_DJ_Trace()
+dj_trace = BenchmarkDJTrace()
diff --git a/src/benchmarks/espresso.py b/src/benchmarks/espresso.py
index b06d823..0916c6c 100644
--- a/src/benchmarks/espresso.py
+++ b/src/benchmarks/espresso.py
@@ -1,15 +1,18 @@
+"""Definition of the espresso benchmark"""
+
import os
from src.benchmark import Benchmark
import src.globalvars
-class Benchmark_Espresso(Benchmark):
+class BenchmarkEspresso(Benchmark):
+ """TODO"""
def __init__(self):
self.name = "espresso"
- self.descrition = """TODO."""
self.cmd = "espresso{binary_suffix} {file}"
- self.args = {"file": [os.path.join(src.globalvars.benchsrcdir, self.name, "largest.espresso")]}
+ self.args = {"file": [os.path.join(src.globalvars.benchsrcdir, self.name,
+ "largest.espresso")]}
super().__init__()
@@ -40,4 +43,4 @@ class Benchmark_Espresso(Benchmark):
self.export_stats_to_dataref("VmHWM")
-espresso = Benchmark_Espresso() \ No newline at end of file
+espresso = BenchmarkEspresso()
diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py
index 85d0a92..203aeb8 100644
--- a/src/benchmarks/falsesharing.py
+++ b/src/benchmarks/falsesharing.py
@@ -1,32 +1,36 @@
+"""Definition of the falsesahring benchmark"""
+
+import re
+
import matplotlib.pyplot as plt
import numpy as np
-import re
from src.benchmark import Benchmark
-time_re = re.compile("^Time elapsed = (?P<time>\\d*\\.\\d*) seconds.$")
+TIME_RE = re.compile("^Time elapsed = (?P<time>\\d*\\.\\d*) seconds.$")
+
+
+class BenchmarkFalsesharing(Benchmark):
+ """Falsesharing benchmark.
+ This benchmarks makes small allocations and writes to them multiple
+ times. If the allocated objects are on the same cache line the writes
+ will be expensive because of cache thrashing.
+ """
-class Benchmark_Falsesharing(Benchmark):
def __init__(self):
self.name = "falsesharing"
- self.descrition = """This benchmarks makes small allocations and writes
- to them multiple times. If the allocated objects
- are on the same cache line the writes will be
- expensive because of cache thrashing."""
self.cmd = "cache-{bench}{binary_suffix} {threads} 100 8 1000000"
- self.args = {
- "bench": ["thrash", "scratch"],
- "threads": Benchmark.scale_threads_for_cpus(2)
- }
+ self.args = {"bench": ["thrash", "scratch"],
+ "threads": Benchmark.scale_threads_for_cpus(2)}
self.requirements = ["cache-thrash", "cache-scratch"]
super().__init__()
def process_output(self, result, stdout, stderr, allocator, perm, verbose):
- result["time"] = time_re.match(stdout).group("time")
+ result["time"] = TIME_RE.match(stdout).group("time")
def summary(self):
# Speedup thrash
@@ -40,13 +44,13 @@ class Benchmark_Falsesharing(Benchmark):
single_threaded_perm = self.Perm(bench=bench, threads=1)
single_threaded = np.mean([float(m["time"])
- for m in self.results[allocator][single_threaded_perm]])
+ for m in self.results[allocator][single_threaded_perm]])
for perm in self.iterate_args_fixed({"bench": bench}, args=args):
- d = [float(m["time"]) for m in self.results[allocator][perm]]
+ data = [float(m["time"]) for m in self.results[allocator][perm]]
- y_vals.append(single_threaded / np.mean(d))
+ y_vals.append(single_threaded / np.mean(data))
plt.plot(nthreads, y_vals, marker='.', linestyle='-',
label=allocator, color=allocators[allocator]["color"])
@@ -71,4 +75,4 @@ class Benchmark_Falsesharing(Benchmark):
fixed=["bench"])
-falsesharing = Benchmark_Falsesharing()
+falsesharing = BenchmarkFalsesharing()
diff --git a/src/benchmarks/httpd.py b/src/benchmarks/httpd.py
index 2ac7fbc..b63f675 100644
--- a/src/benchmarks/httpd.py
+++ b/src/benchmarks/httpd.py
@@ -1,12 +1,15 @@
+"""Definition of the httpd benchmark"""
+
import re
from src.benchmark import Benchmark
-class Benchmark_HTTPD(Benchmark):
+class BenchmarkHTTPD(Benchmark):
+ """TODO"""
+
def __init__(self):
self.name = "httpd"
- self.descrition = """TODO"""
self.args = {"nthreads": Benchmark.scale_threads_for_cpus(2),
"site": ["index.html", "index.php"]}
@@ -60,4 +63,4 @@ class Benchmark_HTTPD(Benchmark):
# title='"ab -n 10000 -c threads"')
-httpd = Benchmark_HTTPD()
+httpd = BenchmarkHTTPD()
diff --git a/src/benchmarks/larson.py b/src/benchmarks/larson.py
index a5c4a02..642901b 100644
--- a/src/benchmarks/larson.py
+++ b/src/benchmarks/larson.py
@@ -1,34 +1,36 @@
+"""Definition of the larson benchmark"""
+
import re
from src.benchmark import Benchmark
-throughput_re = re.compile("^Throughput =\\s*(?P<throughput>\\d+) operations per second.$")
+THROUGHPUT_RE = re.compile("^Throughput =\\s*(?P<throughput>\\d+) operations per second.$")
+
+
+class BenchmarkLarson(Benchmark):
+ """Larson server benchmark
+ This benchmark is courtesy of Paul Larson at Microsoft Research. It
+ simulates a server: each thread allocates and deallocates objects, and then
+ transfers some objects (randomly selected) to other threads to be freed.
+ """
-class Benchmark_Larson(Benchmark):
def __init__(self):
self.name = "larson"
- self.descrition = """This benchmark is courtesy of Paul Larson at
- Microsoft Research. It simulates a server: each
- thread allocates and deallocates objects, and then
- transfers some objects (randomly selected) to
- other threads to be freed."""
# Parameters taken from the paper "Memory Allocation for Long-Running Server
# Applications" from Larson and Krishnan
self.cmd = "larson{binary_suffix} 1 8 {maxsize} 1000 50000 1 {threads}"
- self.args = {
- "maxsize": [64, 512, 1024],
- "threads": Benchmark.scale_threads_for_cpus(2)
- }
+ self.args = {"maxsize": [64, 512, 1024],
+ "threads": Benchmark.scale_threads_for_cpus(2)}
self.requirements = ["larson"]
super().__init__()
def process_output(self, result, stdout, stderr, target, perm, verbose):
- for l in stdout.splitlines():
- res = throughput_re.match(l)
+ for line in stdout.splitlines():
+ res = THROUGHPUT_RE.match(line)
if res:
result["throughput"] = int(res.group("throughput"))
return
@@ -46,4 +48,4 @@ class Benchmark_Larson(Benchmark):
filepostfix="cachemisses")
-larson = Benchmark_Larson()
+larson = BenchmarkLarson()
diff --git a/src/benchmarks/lld.py b/src/benchmarks/lld.py
index 1575ddd..fb6d08f 100644
--- a/src/benchmarks/lld.py
+++ b/src/benchmarks/lld.py
@@ -1,19 +1,23 @@
-import matplotlib.pyplot as plt
-import numpy as np
+"""Benchmark definition using the llvm-lld speed benchmark"""
+
import os
from urllib.request import urlretrieve
import subprocess
import sys
+import matplotlib.pyplot as plt
+
from src.benchmark import Benchmark
-from src.util import print_status
class BenchmarkLld(Benchmark):
+ """LLVM-lld speed benchmark
+
+ This benchmark runs the lld speed benchmark provided by the llvm project.
+ """
+
def __init__(self):
self.name = "lld"
- self.descrition = """This benchmark runs the lld benchmarks provided
- by the llvm project."""
self.run_dir = "lld-speed-test/{test}"
# TODO: don't hardcode ld.lld location
@@ -33,11 +37,11 @@ class BenchmarkLld(Benchmark):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
- s = "\r%5.1f%% %*d / %d" % (
+ status = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
- sys.stderr.write(s)
+ sys.stderr.write(status)
else: # total size is unknown
- sys.stderr.write("\rdownloaded %d" % (readsofar,))
+ sys.stderr.write(f"\rdownloaded {readsofar}")
test_dir = "lld-speed-test"
test_archive = f"{test_dir}.tar.xz"
@@ -52,24 +56,24 @@ class BenchmarkLld(Benchmark):
sys.stderr.write("\n")
# Extract tests
- p = subprocess.run(["tar", "xf", test_archive], stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, universal_newlines=True)
+ proc = subprocess.run(["tar", "xf", test_archive], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=True)
# delete archive
- if p.returncode == 0:
+ if proc.returncode == 0:
os.remove(test_archive)
-
self.args["test"] = os.listdir(test_dir)
return True
+
def cleanup(self):
for perm in self.iterate_args():
a_out = os.path.join("lld-speed-test", perm.test, "a.out")
if os.path.isfile(a_out):
os.remove(a_out)
-
+
def summary(self):
args = self.results["args"]
@@ -77,12 +81,12 @@ class BenchmarkLld(Benchmark):
for perm in self.iterate_args(args=args):
for i, allocator in enumerate(allocators):
-
+
plt.bar([i],
self.results["stats"][allocator][perm]["mean"]["task-clock"],
yerr=self.results["stats"][allocator][perm]["std"]["task-clock"],
label=allocator, color=allocators[allocator]["color"])
-
+
plt.legend(loc="best")
plt.ylabel("Zeit in ms")
plt.title(f"Gesamte Laufzeit {perm.test}")
diff --git a/src/benchmarks/loop.py b/src/benchmarks/loop.py
index 58d4935..a20924a 100644
--- a/src/benchmarks/loop.py
+++ b/src/benchmarks/loop.py
@@ -1,12 +1,16 @@
+"""Definition of the loop micro benchmark"""
+
from src.benchmark import Benchmark
-from src.allocators.bumpptr import bumpptr
-class Benchmark_Loop(Benchmark):
+class BenchmarkLoop(Benchmark):
+ """Loop micro benchmark
+
+ This benchmark allocates and frees n blocks in t concurrent threads.
+ """
+
def __init__(self):
self.name = "loop"
- self.descrition = """This benchmark allocates and frees n blocks in t concurrent
- threads."""
self.cmd = "loop{binary_suffix} {nthreads} 1000000 {maxsize}"
@@ -16,9 +20,6 @@ class Benchmark_Loop(Benchmark):
self.requirements = ["loop"]
super().__init__()
- # add bumpptr alloc
- self.allocators["bumpptr"] = bumpptr.build()
-
def summary(self):
# Speed
self.plot_fixed_arg("perm.nthreads / ({task-clock}/1000)",
@@ -30,7 +31,7 @@ class Benchmark_Loop(Benchmark):
scale = list(self.results["allocators"].keys())[0]
self.plot_fixed_arg("perm.nthreads / ({task-clock}/1000)",
ylabel='"MOPS/cpu-second normalized {}"'.format(scale),
- title='"Loop: " + arg + " " + str(arg_value) + " normalized {}"'.format(scale),
+ title=f'"Loop: " + arg + " " + str(arg_value) + " normalized {scale}"',
filepostfix="time.norm",
scale=scale,
autoticks=False)
@@ -50,4 +51,4 @@ class Benchmark_Loop(Benchmark):
self.export_stats_to_dataref("task-clock")
-loop = Benchmark_Loop()
+loop = BenchmarkLoop()
diff --git a/src/benchmarks/mysql.py b/src/benchmarks/mysql.py
index ce24292..8beb7e6 100644
--- a/src/benchmarks/mysql.py
+++ b/src/benchmarks/mysql.py
@@ -1,5 +1,4 @@
import multiprocessing
-import numpy as np
import os
import re
import shutil
@@ -7,12 +6,11 @@ import subprocess
from subprocess import PIPE
import sys
-from src.globalvars import allocators
from src.benchmark import Benchmark
from src.util import print_status, print_debug, print_info2
TESTDIR = os.path.join(os.getcwd(), "mysql_test")
-MYSQL_USER = "root"
+MYSQL_USER = "fischerling"
RUN_TIME = 10
TABLES = 5
@@ -26,10 +24,14 @@ SERVER_CMD = (f"mysqld --no-defaults -h {TESTDIR} --socket={TESTDIR}/socket --po
f"--max-connections={multiprocessing.cpu_count()} --secure-file-priv=")
-class Benchmark_MYSQL(Benchmark):
+class BenchmarkMYSQL(Benchmark):
+ """Mysql bechmark definition
+
+ See sysbench documentation for more details about the oltp_read_only benchmark
+ """
+
def __init__(self):
self.name = "mysql"
- self.descrition = """See sysbench documentation."""
self.args = {"nthreads": Benchmark.scale_threads_for_cpus(1)}
self.cmd = CMD
@@ -55,8 +57,8 @@ class Benchmark_MYSQL(Benchmark):
# Init database
self.results["facts"]["mysqld"] = subprocess.run(["mysqld", "--version"],
- stdout=PIPE,
- universal_newlines=True).stdout[:-1]
+ stdout=PIPE,
+ universal_newlines=True).stdout[:-1]
if "MariaDB" in self.results["facts"]["mysqld"]:
init_db_cmd = ["mysql_install_db", "--basedir=/usr",
f"--datadir={TESTDIR}"]
@@ -77,16 +79,15 @@ class Benchmark_MYSQL(Benchmark):
self.start_servers()
# Create sbtest TABLE
- p = subprocess.run((f"mysql -u {MYSQL_USER} -S {TESTDIR}/socket").split(),
- input=b"CREATE DATABASE sbtest;\n",
- stdout=PIPE, stderr=PIPE)
+ p = subprocess.run(f"mysql -u {MYSQL_USER} -S {TESTDIR}/socket".split(),
+ input=b"CREATE DATABASE sbtest;\n",
+ stdout=PIPE, stderr=PIPE)
if p.returncode != 0:
print_debug("Stderr:", p.stderr, file=sys.stderr)
raise Exception("Creating test tables failed with:", p.returncode)
print_status("Prepare test tables ...")
- ret = True
p = subprocess.run(PREPARE_CMD.split(), stdout=PIPE, stderr=PIPE)
if p.returncode != 0:
print_debug("Stdout:", p.stdout, file=sys.stderr)
@@ -159,11 +160,12 @@ class Benchmark_MYSQL(Benchmark):
# Colored latex table showing transactions count
d = {allocator: {} for allocator in allocators}
for perm in self.iterate_args(args=args):
- for i, allocator in enumerate(allocators):
- t = [float(x["transactions"]) for x in self.results[allocator][perm]]
- m = np.mean(t)
- s = np.std(t)/m
- d[allocator][perm] = {"mean": m, "std": s}
+ for allocator in allocators:
+ transactions = [float(measure["transactions"])
+ for measure in self.results[allocator][perm]]
+ mean = np.mean(transactions)
+ std = np.std(transactions)/mean
+ d[allocator][perm] = {"mean": mean, "std": std}
mins = {}
maxs = {}
@@ -208,4 +210,4 @@ class Benchmark_MYSQL(Benchmark):
self.export_stats_to_dataref("transactions")
-mysql = Benchmark_MYSQL()
+mysql = BenchmarkMYSQL()
diff --git a/src/benchmarks/realloc.py b/src/benchmarks/realloc.py
index f3f3c2e..c8eb955 100644
--- a/src/benchmarks/realloc.py
+++ b/src/benchmarks/realloc.py
@@ -1,10 +1,15 @@
+"""Definition of the realloc micro benchmark"""
+
from src.benchmark import Benchmark
-class Benchmark_Realloc(Benchmark):
+class BenchmarkRealloc(Benchmark):
+ """Realloc micro benchmark
+
+ realloc a pointer 100 times
+ """
def __init__(self):
self.name = "realloc"
- self.descrition = """Realloc 100 times"""
self.cmd = "realloc"
@@ -22,4 +27,4 @@ class Benchmark_Realloc(Benchmark):
self.export_stats_to_dataref("task-clock")
-realloc = Benchmark_Realloc()
+realloc = BenchmarkRealloc()
diff --git a/src/benchmarks/t_test1.py b/src/benchmarks/t_test1.py
index a2a6e0b..b22b21c 100644
--- a/src/benchmarks/t_test1.py
+++ b/src/benchmarks/t_test1.py
@@ -1,12 +1,16 @@
+"""Definition of the commonly used t-test1 allocator test"""
+
from src.benchmark import Benchmark
-from src.allocators.bumpptr import bumpptr
-class Benchmark_t_test1(Benchmark):
+class BenchmarkTTest1(Benchmark):
+ """t-test1 unit test
+
+ This benchmark from ptmalloc2 allocates and frees n bins in t concurrent threads.
+ """
+
def __init__(self):
self.name = "t_test1"
- self.descrition = """This benchmark from ptmalloc2 allocates and frees
- n bins in t concurrent threads."""
self.cmd = "t-test1 {nthreads} {nthreads} 1000000 {maxsize}"
@@ -16,8 +20,6 @@ class Benchmark_t_test1(Benchmark):
self.requirements = ["t-test1"]
super().__init__()
- self.allocators["bumpptr"] = bumpptr.build()
-
def summary(self):
# mops / per second
yval = "perm.nthreads / ({task-clock}/1000)"
@@ -50,4 +52,4 @@ class Benchmark_t_test1(Benchmark):
self.export_stats_to_csv("task-clock")
-t_test1 = Benchmark_t_test1()
+t_test1 = BenchmarkTTest1()