aboutsummaryrefslogtreecommitdiff
path: root/bench.py
blob: ad8b3795823fdedb1c10dc9b4bc15e7a09107ffd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
#!/usr/bin/env python3

# Copyright 2018-2020 Florian Fischer <florian.fl.fischer@fau.de>
#
# This file is part of allocbench.
#
# allocbench is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# allocbench is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
"""Start an allocbench run"""

import argparse
import atexit
import datetime
import os
import sys
import traceback

from allocbench.allocator import collect_allocators
from allocbench.analyse import analyze_bench, analyze_allocators
from allocbench.benchmark import get_benchmark_object, AVAIL_BENCHMARKS
from allocbench.directories import get_current_result_dir, set_current_result_dir
import allocbench.facter as facter
from allocbench.util import (run_cmd, print_status, print_license_and_exit,
                             get_logger, set_verbosity)

from summarize import summarize

logger = get_logger(__file__)


def epilog():
    """Run tasks on exit"""
    # remove a left over status file if some is present
    if os.path.exists("status"):
        os.remove("status")

    res_dir = get_current_result_dir()
    # After early errors resdir may not be set
    if not res_dir:
        return

    if not res_dir.iterdir():
        logger.warning("Remove empty resultdir")
        res_dir.rmdir()
    else:
        endtime = datetime.datetime.now().isoformat()
        endtime = endtime[:endtime.rfind(':')]
        facter.FACTS["endtime"] = endtime
        facter.store_facts(res_dir)


def check_dependencies():
    """Check if known requirements of allocbench are met"""
    # used python 3.6 features: f-strings
    if sys.version_info[0] < 3 or sys.version_info[1] < 6:
        logger.critical("At least python version 3.6 is required.")
        sys.exit(1)


def main():
    """Main entry point for an allocbench benchmark run"""
    check_dependencies()

    parser = argparse.ArgumentParser(description="benchmark memory allocators")
    parser.add_argument("--analyze",
                        help="analyze benchmark behavior",
                        action="store_true")
    parser.add_argument("--analyze-allocators",
                        help="analyze allocator behavior",
                        action="store_true")
    parser.add_argument("-r",
                        "--runs",
                        help="how often the benchmarks run",
                        default=3,
                        type=int)
    parser.add_argument("-v",
                        "--verbose",
                        help="more output",
                        action='count',
                        default=0)
    parser.add_argument("-b",
                        "--benchmarks",
                        help="benchmarks to run",
                        nargs='+')
    parser.add_argument("-xb",
                        "--exclude-benchmarks",
                        help="explicitly excluded benchmarks",
                        nargs='+')
    parser.add_argument("-a",
                        "--allocators",
                        help="allocators to test",
                        type=str,
                        nargs='+')
    parser.add_argument("-rd",
                        "--resultdir",
                        help="directory where all results go",
                        type=str)
    parser.add_argument("-s",
                        "--summarize",
                        help="create a summary of this run",
                        action='store_true')
    parser.add_argument("--license",
                        help="print license info and exit",
                        action='store_true')
    parser.add_argument("--version",
                        help="print version info and exit",
                        action='version',
                        version=f"allocbench {facter.allocbench_version()}")

    args = parser.parse_args()
    if args.license:
        print_license_and_exit()

    atexit.register(epilog)

    set_verbosity(args.verbose)

    logger.debug("Arguments: %s", args)

    # Prepare allocbench
    print_status("Building allocbench ...")
    # TODO: sort out recursive makes when running integration tests through our Makefile
    if not 'MAKELEVEL' in os.environ:
        make_cmd = ["make", "-d"]
        if args.verbose < 2:
            make_cmd.append("-s")
        run_cmd(make_cmd, output_verbosity=1)

    # allocators to benchmark
    allocators = collect_allocators(args.allocators)

    logger.info(f"Allocators: {'%s, ' * (len(allocators) - 1)}%s",
                *allocators.keys())
    logger.debug(f"Allocators: {'%s, ' * (len(allocators) - 1)}%s",
                 *allocators.items())

    if not allocators:
        logger.critical("Abort because there are no allocators to benchmark")
        sys.exit(1)

    # collect facts about benchmark environment
    facter.collect_facts()

    # Create result directory
    if args.resultdir:
        set_current_result_dir(args.resultdir)
    else:
        set_current_result_dir(
            os.path.join("results", facter.FACTS["hostname"],
                         facter.FACTS["starttime"]))

    print_status("Writing results to:", get_current_result_dir())

    cwd = os.getcwd()

    exit_code = 0
    # warn about unknown benchmarks
    for bench in (args.benchmarks or []) + (args.exclude_benchmarks or []):
        if bench not in AVAIL_BENCHMARKS:
            logger.error('Benchmark "%s" unknown!', bench)
            exit_code = 1

    # Run actual benchmarks
    for bench in AVAIL_BENCHMARKS:
        if args.benchmarks and bench not in args.benchmarks:
            continue

        if args.exclude_benchmarks and bench in args.exclude_benchmarks:
            continue

        try:
            print_status("Loading", bench, "...")
            bench = get_benchmark_object(bench)
        except Exception:  #pylint: disable=broad-except
            logger.error(traceback.format_exc())
            logger.error("Skipping %s! Loading failed.", bench)
            exit_code = 1
            continue

        try:
            print_status("Preparing", bench, "...")
            bench.prepare()
        except Exception:  #pylint: disable=broad-except
            logger.error(traceback.format_exc())
            logger.error("Skipping %s! Preparing failed.", bench)
            exit_code = 1
            continue

        if args.analyze:
            analyze_bench(bench)

        if args.analyze_allocators:
            analyze_allocators(bench, allocators)

        if args.runs > 0:
            print_status("Running", bench.name, "...")
            start_time = datetime.datetime.now()
            bench.results['facts']['start-time'] = start_time.isoformat()
            try:
                bench.run(allocators, runs=args.runs)
            except Exception:  #pylint: disable=broad-except
                # Reset cwd
                os.chdir(cwd)
                logger.error(traceback.format_exc())
                logger.error("Skipping %s!", bench)
                exit_code = 1
                continue

            end_time = datetime.datetime.now()
            bench.results['facts']['end-time'] = end_time.isoformat()
            bench.results['facts']['duration'] = (end_time -
                                                  start_time).total_seconds()

        # Save results in resultdir
        bench.save(get_current_result_dir())

        if hasattr(bench, "cleanup"):
            print_status("Cleaning up", bench.name, "...")
            bench.cleanup()

    if args.summarize:
        summarize()

    return exit_code


if __name__ == "__main__":
    sys.exit(main())