aboutsummaryrefslogtreecommitdiff
path: root/src/benchmark.py
diff options
context:
space:
mode:
authorFlorian Fischer <florian.fl.fischer@fau.de>2019-08-11 22:28:38 +0200
committerFlorian Fischer <florian.fl.fischer@fau.de>2019-08-11 22:40:06 +0200
commit9d056ff64bf887e2c3479fe515bf7c95cb39e5b6 (patch)
treed9c2d9819e9b0a175721562db546fbc25f3ca54f /src/benchmark.py
parentf7d64e02af941515bc894dea10f55255d06cbcf7 (diff)
downloadallocbench-9d056ff64bf887e2c3479fe515bf7c95cb39e5b6.tar.gz
allocbench-9d056ff64bf887e2c3479fe515bf7c95cb39e5b6.zip
Rework exec chain
Originally the structure of the executed cmd was {measure cmd} {allocator cmd prefix} {cmd} with the parent environment except LD_PRELOAD was modified for the whole command chain. Unfortunatly perf causes segfaults with some allocators and measuring allocators cmd prefixes doesnt seem fair. So the new cmd chain looks like: {allocator cmd prefix} {measure cmd} run_cmd <LD_PRELOAD> {cmd} without touching the environment in python. run_cmd sets LD_PRELOAD to the value it received in argv[1] and executes argv[2] with the rest of argv. This does also measure code not part of the actual benchmark but in a equal manner and not only for some allocators.
Diffstat (limited to 'src/benchmark.py')
-rw-r--r--src/benchmark.py35
1 files changed, 17 insertions, 18 deletions
diff --git a/src/benchmark.py b/src/benchmark.py
index 698a40d..fa99c65 100644
--- a/src/benchmark.py
+++ b/src/benchmark.py
@@ -52,8 +52,8 @@ class Benchmark (object):
print_error("Killing subprocess ", popen.args)
popen.kill()
popen.wait()
- print_debug("Server Out:", popen.stdout)
- print_debug("Server Err:", popen.stderr)
+ print_debug("Server Out:", popen.stdout.read())
+ print_debug("Server Err:", popen.stderr.read())
@staticmethod
def scale_threads_for_cpus(factor, steps=None):
@@ -311,24 +311,23 @@ class Benchmark (object):
substitutions["perm"] = ("{}-"*(len(perm)-1) + "{}").format(*perm)
substitutions.update(alloc)
- actual_cmd = self.cmd.format(**substitutions)
- actual_env = None
+ cmd_argv = self.cmd.format(**substitutions).split()
+ argv = []
# Prepend cmd if we are not measuring servers
if self.server_cmds == []:
- actual_cmd = src.util.prefix_cmd_with_abspath(actual_cmd)
- actual_cmd = "{} {} {}".format(self.measure_cmd,
- alloc["cmd_prefix"],
- actual_cmd)
- # substitute again
- actual_cmd = actual_cmd.format(**substitutions)
-
- actual_env = env
-
- print_debug("\nCmd:", actual_cmd)
- res = subprocess.run(actual_cmd.split(),
- env=actual_env,
- stderr=subprocess.PIPE,
+ prefix_argv = alloc["cmd_prefix"].format(**substitutions).split()
+ measure_argv = self.measure_cmd.format(**substitutions)
+ measure_argv = src.util.prefix_cmd_with_abspath(measure_argv).split()
+
+ argv.extend(prefix_argv)
+ argv.extend(measure_argv)
+ argv.extend(["build/run_cmd", env["LD_PRELOAD"]])
+
+ argv.extend(cmd_argv)
+
+ print_debug("\nCmd:", argv)
+ res = subprocess.run(argv, stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
@@ -339,7 +338,7 @@ class Benchmark (object):
print_debug("Stdout:\n" + res.stdout)
print_debug("Stderr:\n" + res.stderr)
if res.returncode != 0:
- print_error("{} failed with exit code {} for {}".format(actual_cmd, res.returncode, alloc_name))
+ print_error("{} failed with exit code {} for {}".format(argv, res.returncode, alloc_name))
else:
print_error("Preloading of {} failed for {}".format(alloc["LD_PRELOAD"], alloc_name))