Skip to content

Commit

Permalink
Merge pull request GoogleCloudPlatform#614 from GoogleCloudPlatform/s…
Browse files Browse the repository at this point in the history
…chedule-benchmarks-after-failure

Change behavior after a benchmark fails.
  • Loading branch information
skschneider committed Nov 5, 2015
2 parents 48fb15c + 684540a commit d21d3d3
Showing 1 changed file with 44 additions and 3 deletions.
47 changes: 44 additions & 3 deletions perfkitbenchmarker/pkb.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,13 @@
'on any VMs. This option should probably only ever be used '
'if you have already created an image with all relevant '
'packages installed.')
flags.DEFINE_bool(
'stop_after_benchmark_failure', False,
'Determines response when running multiple benchmarks serially and a '
'benchmark run fails. When True, no further benchmarks are scheduled, and '
'execution ends. When False, benchmarks continue to be scheduled. Does not '
'apply to keyboard interrupts, which will always prevent further '
'benchmarks from being scheduled.')

# Support for using a proxy in the cloud environment.
flags.DEFINE_string('http_proxy', '',
Expand Down Expand Up @@ -410,12 +417,13 @@ def RunBenchmarks(publish=True):
static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile(
fp)

benchmark_arg_succeeded_tuples = []
try:
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
total_benchmarks = len(benchmark_tuple_list)

benchmark_counts = collections.Counter()
args = []
args = collections.deque()
for i, benchmark_tuple in enumerate(benchmark_tuple_list):
benchmark_module, user_config = benchmark_tuple
benchmark_uid = (benchmark_module.BENCHMARK_NAME +
Expand All @@ -429,13 +437,46 @@ def RunBenchmarks(publish=True):
vm_util.RunThreaded(
RunBenchmark, args, max_concurrent_threads=FLAGS.parallelism)
else:
for run_args, kwargs in args:
RunBenchmark(*run_args, **kwargs)
while args:
run_args, _ = args.popleft()
benchmark_module, _, sequence_number, _, _, benchmark_uid = run_args
benchmark_name = benchmark_module.BENCHMARK_NAME
try:
RunBenchmark(*run_args)
benchmark_arg_succeeded_tuples.append((run_args, True))
except BaseException as e:
benchmark_arg_succeeded_tuples.append((run_args, False))
msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format(
sequence_number, total_benchmarks, benchmark_name, benchmark_uid)
if (isinstance(e, KeyboardInterrupt) or
FLAGS.stop_after_benchmark_failure):
logging.error('%s Execution will not continue.', msg)
break
logging.error('%s Execution will continue.', msg)
benchmark_arg_succeeded_tuples.extend((a, False) for a, _ in args)

finally:
if collector.samples:
collector.PublishSamples()

if benchmark_arg_succeeded_tuples:
successful_benchmarks = tuple(
run_args[0].BENCHMARK_NAME
for run_args, succeeded in benchmark_arg_succeeded_tuples
if succeeded)
failed_benchmarks = tuple(
run_args[0].BENCHMARK_NAME
for run_args, succeeded in benchmark_arg_succeeded_tuples
if not succeeded)
logging.info('The following benchmarks succeeded: %s',
', '.join(successful_benchmarks))
if failed_benchmarks:
logging.warning('The following benchmarks failed or were not executed: '
'%s', ', '.join(failed_benchmarks))
if total_benchmarks:
logging.info('Benchmark success rate: %.2f%% (%d/%d)',
len(successful_benchmarks) / total_benchmarks * 100.,
len(successful_benchmarks), total_benchmarks)
logging.info('Complete logs can be found at: %s',
vm_util.PrependTempDir(LOG_FILE_NAME))

Expand Down

0 comments on commit d21d3d3

Please sign in to comment.