@@ -24,9 +24,12 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
test_func -- benchmarking function with prototype
test_func(env, case), which takes test_env and test_case
- arguments and returns {'seconds': int} (which is benchmark
- result) on success and {'error': str} on error. Returned
- dict may contain any other additional fields.
+ arguments and on success returns dict with 'seconds' or
+ 'iops' (or both) fields, specifying the benchmark result.
+ If both 'iops' and 'seconds' provided, the 'iops' is
+ considered the main, and 'seconds' is just an additional
+ info. On failure test_func should return {'error': str}.
+ Returned dict may contain any other additional fields.
test_env -- test environment - opaque first argument for test_func
test_case -- test case - opaque second argument for test_func
count -- how many times to call test_func, to calculate average
@@ -34,6 +37,7 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
Returns dict with the following fields:
'runs': list of test_func results
+ 'dimension': dimension of results, may be 'seconds' or 'iops'
'average': average seconds per run (exists only if at least one run
succeeded)
'delta': maximum delta between test_func result and the average
@@ -54,11 +58,20 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
result = {'runs': runs}
- successed = [r for r in runs if ('seconds' in r)]
+ successed = [r for r in runs if ('seconds' in r or 'iops' in r)]
if successed:
- avg = sum(r['seconds'] for r in successed) / len(successed)
+ dim = 'iops' if ('iops' in successed[0]) else 'seconds'
+ if 'iops' in successed[0]:
+ assert all('iops' in r for r in successed)
+ dim = 'iops'
+ else:
+ assert all('seconds' in r for r in successed)
+ assert all('iops' not in r for r in successed)
+ dim = 'seconds'
+ avg = sum(r[dim] for r in successed) / len(successed)
+ result['dimension'] = dim
result['average'] = avg
- result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
+ result['delta'] = max(abs(r[dim] - avg) for r in successed)
if len(successed) < count:
result['n-failed'] = count - len(successed)
@@ -118,11 +131,17 @@ def ascii(results):
"""Return ASCII representation of bench() returned dict."""
from tabulate import tabulate
+ dim = None
tab = [[""] + [c['id'] for c in results['envs']]]
for case in results['cases']:
row = [case['id']]
for env in results['envs']:
- row.append(ascii_one(results['tab'][case['id']][env['id']]))
+ res = results['tab'][case['id']][env['id']]
+ if dim is None:
+ dim = res['dimension']
+ else:
+ assert dim == res['dimension']
+ row.append(ascii_one(res))
tab.append(row)
- return tabulate(tab)
+ return f'All results are in {dim}\n\n' + tabulate(tab)
Support benchmarks returning not seconds but iops. We'll use it for further new test. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> --- scripts/simplebench/simplebench.py | 35 +++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-)