aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbench.py73
-rw-r--r--src/benchmark.py64
-rw-r--r--src/globalvars.py1
-rw-r--r--src/util.py43
4 files changed, 130 insertions, 51 deletions
diff --git a/bench.py b/bench.py
index 4eeede3..42fa992 100755
--- a/bench.py
+++ b/bench.py
@@ -6,17 +6,23 @@ import importlib
import os
import subprocess
-import src.facter
import src.allocators
+import src.facter
+import src.globalvars
+from src.util import *
+
benchmarks = ["loop", "mysql", "falsesharing", "dj_trace", "larson"]
parser = argparse.ArgumentParser(description="benchmark memory allocators")
-parser.add_argument("-s", "--save", help="save benchmark results in RESULTDIR", action='store_true')
+parser.add_argument("-ds, --dont-save", action='store_true', dest="dont_save",
+ help="don't save benchmark results in RESULTDIR")
parser.add_argument("-l", "--load", help="load benchmark results from directory", type=str)
parser.add_argument("-a", "--allocators", help="load allocator definitions from file", type=str)
parser.add_argument("-r", "--runs", help="how often the benchmarks run", default=3, type=int)
-parser.add_argument("-v", "--verbose", help="more output", action='store_true')
+parser.add_argument("-v", "--verbose", help="more output", action='count')
+parser.add_argument("-vdebug", "--verbose-debug", help="debug output",
+ action='store_true', dest="verbose_debug")
parser.add_argument("-b", "--benchmarks", help="benchmarks to run", nargs='+')
parser.add_argument("-ns", "--nosum", help="don't produce plots", action='store_true')
parser.add_argument("-rd", "--resultdir", help="directory where all results go", type=str)
@@ -30,77 +36,104 @@ def main():
print("License GPLv3: GNU GPL version 3 <http://gnu.org/licenses/gpl.html>")
return
- if args.verbose:
- print(args)
+ # Set global verbosity
+ # quiet | -1: Don't output to stdout
+ # default | 0: Only print status and errors
+ # 1: Print warnings and some infos
+ # 2: Print all infos
+ # debug | 99: Print all awailable infos
+ if args.verbose_debug:
+ src.globalvars.verbosity = 99
+ elif args.verbose:
+ src.globalvars.verbosity = args.verbose
+
+ verbosity = src.globalvars.verbosity
+
+ print_info2("Arguments:", args)
# Prepare allocbench
- print("Building allocbench")
+ print_status("Building allocbench ...")
make_cmd = ["make"]
- if not args.verbose:
+ if verbosity < 1:
make_cmd.append("-s")
+ else:
+ # Flush stdout so the color reset from print_status works
+ print("", end="", flush=True)
subprocess.run(make_cmd)
+ # Prepare compared allocators
allocators_file = os.path.join("build", "allocators", "allocators.py")
if args.allocators or os.path.isfile(allocators_file):
allocators_files = args.allocators or allocators_file
with open(allocators_files, "r") as f:
- g = {"verbose": args.verbose}
+ g = {"verbosity": verbosity}
exec(f.read(), g)
src.allocators.allocators = g["allocators"]
- if args.verbose:
- print("Allocators:", *src.allocators.allocators.keys())
+ print_info("Allocators:", *src.allocators.allocators.keys())
- if args.save or not args.nosum and not (args.runs < 1 and not args.load):
+ # Create result directory if we save or summarize results
+ need_resultdir = not (args.nosum and args.dont_save)
+ if need_resultdir:
if args.resultdir:
resdir = os.path.join(args.resultdir)
else:
- resdir = os.path.join("results", src.facter.get_hostname(),
+ hostname = src.facter.get_hostname()
+ # TODO use saved hostname
+ if args.load and args.runs < 2:
+ pass
+ resdir = os.path.join("results", hostname,
datetime.datetime.now().strftime("%Y-%m-%dT%H:%M"))
try:
+ print_info2("Creating result dir:", resdir)
os.makedirs(resdir)
except FileExistsError:
pass
+ # TODO load all results at once
+
for bench in benchmarks:
bench = eval("importlib.import_module('src.{0}').{0}".format(bench))
if args.benchmarks and not bench.name in args.benchmarks:
continue
+
if args.load:
bench.load(path=args.load)
if args.runs > 0:
- print("Preparing", bench.name, "...")
+ print_status("Preparing", bench.name, "...")
if not bench.prepare():
- print("Preparing", bench.name, "failed!")
+ print_error("Preparing", bench.name, "failed!")
continue
- if not bench.run(runs=args.runs, verbose=args.verbose):
+ print_status("Running", bench.name, "...")
+ if not bench.run(runs=args.runs):
continue
- if args.save or not args.nosum and not (args.runs < 1 and not args.load):
+ if need_resultdir:
+ print_info2("Changing cwd to:", resdir)
old_cwd = os.getcwd()
os.chdir(resdir)
- if args.save or args.nosum:
+ if not args.dont_save:
bench.save()
- if not args.nosum and not (args.runs < 1 and not args.load):
+ if not args.nosum:
try:
os.mkdir(bench.name)
except FileExistsError:
pass
os.chdir(bench.name)
- print("Summarizing", bench.name, "...")
+ print_status("Summarizing", bench.name, "...")
bench.summary()
os.chdir(old_cwd)
if args.runs > 0 and hasattr(bench, "cleanup"):
- print("Cleaning up", bench.name, "...")
+ print_status("Cleaning up", bench.name, "...")
bench.cleanup()
if __name__ == "__main__":
diff --git a/src/benchmark.py b/src/benchmark.py
index af0fd8e..e5620ae 100644
--- a/src/benchmark.py
+++ b/src/benchmark.py
@@ -9,6 +9,8 @@ import shutil
import subprocess
from src.allocators import allocators
+import src.globalvars
+from src.util import *
class Benchmark (object):
@@ -46,10 +48,9 @@ class Benchmark (object):
if not hasattr(self, "requirements"):
self.requirements = []
- def save(self, path=None, verbose=False):
+ def save(self, path=None):
f = path if path else self.name + ".save"
- if verbose:
- print("Saving results to:", self.name + ".save")
+ print_info("Saving results to:", self.name + ".save")
# Pickle can't handle namedtuples so convert the dicts of namedtuples
# into lists of dicts.
save_data = {}
@@ -63,7 +64,7 @@ class Benchmark (object):
with open(f, "wb") as f:
pickle.dump(save_data, f)
- def load(self, path=None, verbose=False):
+ def load(self, path=None):
if not path:
f = self.name + ".save"
else:
@@ -71,8 +72,8 @@ class Benchmark (object):
f = os.path.join(path, self.name + ".save")
else:
f = path
- if verbose:
- print("Loading results from:", self.name + ".save")
+
+ print_info("Loading results from:", self.name + ".save")
with open(f, "rb") as f:
self.results = pickle.load(f)
# Build new named tuples
@@ -82,7 +83,7 @@ class Benchmark (object):
d[self.Perm(**dic)] = measures
self.results[allocator] = d
- def prepare(self, verbose=False):
+ def prepare(self):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
@@ -95,7 +96,7 @@ class Benchmark (object):
# Search for file
if fpath:
if not is_exe(r):
- print("requirement:", r, "not found")
+ print_error("requirement:", r, "not found")
return False
# Search in PATH
else:
@@ -106,7 +107,7 @@ class Benchmark (object):
found = True
if not found:
- print("requirement:", r, "not found")
+ print_error("requirement:", r, "not found")
return False
return True
@@ -131,34 +132,30 @@ class Benchmark (object):
if is_fixed:
yield p
- def run(self, runs=5, verbose=False):
- if runs > 0:
- print("Running", self.name, "...")
-
+ def run(self, runs=5):
# check if perf is allowed on this system
if self.measure_cmd == self.defaults["measure_cmd"]:
if Benchmark.perf_allowed == None:
- if verbose:
- print("Check if you are allowed to use perf ...")
+ print_info("Check if you are allowed to use perf ...")
res = subprocess.run(["perf", "stat", "ls"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if res.returncode != 0:
- print("Test perf run failed with:")
- print(res.stderr)
+ print_error("Test perf run failed with:")
+ print(res.stderr, file=sys.stderr)
Benchmark.perf_allowed = False
else:
Benchmark.perf_allowed = True
if not Benchmark.perf_allowed:
- print("Skipping", self.name, "because you don't have the",
+ print_error("Skipping", self.name, "because you don't have the",
"needed permissions to use perf")
return False
n = len(list(self.iterate_args())) * len(self.allocators)
for run in range(1, runs + 1):
- print(str(run) + ". run")
+ print_status(run, ". run", sep='')
i = 0
for tname, t in self.allocators.items():
@@ -169,12 +166,13 @@ class Benchmark (object):
os.environ["LD_PRELOAD"] += t["LD_PRELOAD"]
if hasattr(self, "preallocator_hook"):
- if self.preallocator_hook((tname, t), run, verbose):
+ if self.preallocator_hook((tname, t), run,
+ verbose=src.globalvars.verbosity):
return False
for perm in self.iterate_args():
i += 1
- print(i, "of", n, "\r", end='')
+ print_info0(i, "of", n, "\r", end='')
perm_dict = perm._asdict()
perm_dict.update(t)
@@ -192,24 +190,25 @@ class Benchmark (object):
actual_cmd = self.measure_cmd + " " + actual_cmd
+ print_debug("Cmd:", actual_cmd)
res = subprocess.run(actual_cmd.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
if res.returncode != 0:
- print("\n" + actual_cmd, "exited with", res.returncode,
+ print_error("\n" + actual_cmd, "exited with", res.returncode,
"for", tname)
- print("Aborting Benchmark.")
- print("Stdout:\n" + res.stdout)
- print("Stderr:\n" + res.stderr)
+ print_debug("Stdout:\n" + res.stdout)
+ print_debug("Stderr:\n" + res.stderr)
+ print_error("Aborting Benchmark.")
return False
if "ERROR: ld.so" in res.stderr:
- print("\nPreloading of", t["LD_PRELOAD"],
+ print_error("\nPreloading of", t["LD_PRELOAD"],
"failed for", tname)
- print("Stderr:\n" + res.stderr)
- print("Aborting Benchmark.")
+ print_debug("Stderr:\n" + res.stderr)
+ print_error("Aborting Benchmark.")
return False
result = {}
@@ -225,7 +224,8 @@ class Benchmark (object):
if hasattr(self, "process_output"):
self.process_output(result, res.stdout, res.stderr,
- tname, perm, verbose)
+ tname, perm,
+ verbose=src.globalvars.verbosity)
# Parse perf output if available
if self.measure_cmd == self.defaults["measure_cmd"]:
@@ -236,7 +236,7 @@ class Benchmark (object):
try:
result[row[2].split(":")[0]] = row[0]
except Exception as e:
- print("Exception", e, "occured on", row, "for",
+ print_warn("Exception", e, "occured on", row, "for",
tname, "and", perm)
if run == 1:
@@ -244,9 +244,11 @@ class Benchmark (object):
self.results[tname][perm].append(result)
if hasattr(self, "postallocator_hook"):
- if self.postallocator_hook((tname, t), run, verbose):
+ if self.postallocator_hook((tname, t), run,
+ verbose=src.globalvars.verbosity):
return False
print()
+ # Reset PATH
os.environ["PATH"] = os.environ["PATH"].replace(":build/" + self.name, "")
return True
diff --git a/src/globalvars.py b/src/globalvars.py
new file mode 100644
index 0000000..1949816
--- /dev/null
+++ b/src/globalvars.py
@@ -0,0 +1 @@
+verbosity = 0 \ No newline at end of file
diff --git a/src/util.py b/src/util.py
new file mode 100644
index 0000000..4190f82
--- /dev/null
+++ b/src/util.py
@@ -0,0 +1,43 @@
+import colorama
+import sys
+
+import src.globalvars
+
+def allocbench_msg(color, *objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ if src.globalvars.verbosity < 0:
+ return
+ color = getattr(colorama.Fore, color)
+ print(color, end="", file=file)
+ print(*objects, sep=sep, end=end, file=file)
+ print(colorama.Fore.RESET, end="", file=file, flush=flush)
+
+def print_debug(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ if src.globalvars.verbosity < 99:
+ return
+ print(*objects, sep=sep, end=end, file=file, flush=flush)
+
+def print_info(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ if src.globalvars.verbosity < 1:
+ return
+ print(*objects, sep=sep, end=end, file=file, flush=flush)
+
+def print_info0(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ if src.globalvars.verbosity < 0:
+ return
+ print(*objects, sep=sep, end=end, file=file, flush=flush)
+
+def print_info2(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ if src.globalvars.verbosity < 2:
+ return
+ print(*objects, sep=sep, end=end, file=file, flush=flush)
+
+def print_status(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ allocbench_msg("GREEN", *objects, sep=sep, end=end, file=file, flush=flush)
+
+def print_warn(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
+ if src.globalvars.verbosity < 1:
+ return
+ allocbench_msg("YELLOW", *objects, sep=sep, end=end, file=file, flush=flush)
+
+def print_error(*objects, sep=' ', end='\n', file=sys.stderr, flush=False):
+ allocbench_msg("RED", *objects, sep=sep, end=end, file=file, flush=flush)