reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
#!/usr/bin/env python

"""
A simple bench runner which delegates to the ./dotest.py test driver to run the
benchmarks defined in the list named 'benches'.

You need to hand edit 'benches' to modify/change the command lines passed to the
test driver.

Use the following to get only the benchmark results in your terminal output:

    ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
"""

from __future__ import print_function
from __future__ import absolute_import

import os
from optparse import OptionParser

# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
# unless there is a mentioning of custom executable program.
benches = [
    # Measure startup delays creating a target, setting a breakpoint, and run
    # to breakpoint stop.
    './dotest.py -v +b %E %X -n -p TestStartupDelays.py',

    # Measure 'frame variable' response after stopping at a breakpoint.
    './dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py',

    # Measure stepping speed after stopping at a breakpoint.
    './dotest.py -v +b %E %X -n -p TestSteppingSpeed.py',

    # Measure expression cmd response with a simple custom executable program.
    './dotest.py +b -n -p TestExpressionCmd.py',

    # Attach to a spawned process then run disassembly benchmarks.
    './dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py'
]


def main():
    """Read the items from 'benches' and run the command line one by one."""
    parser = OptionParser(usage="""\
%prog [options]
Run the standard benchmarks defined in the list named 'benches'.\
""")
    parser.add_option('-e', '--executable',
                      type='string', action='store',
                      dest='exe',
                      help='The target program launched by lldb.')
    parser.add_option('-x', '--breakpoint-spec',
                      type='string', action='store',
                      dest='break_spec',
                      help='The lldb breakpoint spec for the target program.')

    # Parses the options, if any.
    opts, args = parser.parse_args()

    print("Starting bench runner....")

    for item in benches:
        command = item.replace('%E',
                               '-e "%s"' % opts.exe if opts.exe else '')
        command = command.replace('%X', '-x "%s"' %
                                  opts.break_spec if opts.break_spec else '')
        print("Running %s" % (command))
        os.system(command)

    print("Bench runner done.")

if __name__ == '__main__':
    main()