diff --git a/tools/jenkins/run_trickle_diff.sh b/tools/jenkins/run_trickle_diff.sh index da905d024903598f3d2c352575710eff26c6a037..47dd8b44d64fc34e3c833f26cd77df2d8fcd0023 100755 --- a/tools/jenkins/run_trickle_diff.sh +++ b/tools/jenkins/run_trickle_diff.sh @@ -20,4 +20,4 @@ set -ex cd $(dirname $0)/../.. tools/run_tests/start_port_server.py -tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls cli_stream_stalls svr_transport_stalls svr_stream_stalls --no-counters --pr_comment_name trickle +tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls_per_iteration cli_stream_stalls_per_iteration svr_transport_stalls_per_iteration svr_stream_stalls_per_iteration --no-counters --pr_comment_name trickle diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py index 4cd65867c371c643178a0028abb771201058f130..ad79a0a1972210d9fdf9a4115e9d9a694d5ab14b 100644 --- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py @@ -26,5 +26,6 @@ _AVAILABLE_BENCHMARK_TESTS = [ _INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration', 'allocs_per_iteration', 'writes_per_iteration', 'atm_cas_per_iteration', 'atm_add_per_iteration', - 'nows_per_iteration', 'cli_transport_stalls', 'cli_stream_stalls', - 'svr_transport_stalls', 'svr_stream_stalls',) + 'nows_per_iteration', 'cli_transport_stalls_per_iteration', + 'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration', + 'svr_stream_stalls_per_iteration',) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py index ec1840e2a109c77e0d413d1d31f9e4e577f749d2..809817a1a8c4e7f0d1887cefa75f0cba8912f929 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py @@ -108,9 +108,10 @@ class Benchmark: mdn_diff = abs(_median(new) - _median(old)) _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' % (f, new_name, new, old_name, old, mdn_diff)) - s = bm_speedup.speedup(new, old, 1e-10) - if abs(s) > 3 and mdn_diff > 0.5: - self.final[f] = '%+d%%' % s + s = bm_speedup.speedup(new, old, 1e-5) + if abs(s) > 3: + if mdn_diff > 0.5 or 'trickle' in f: + self.final[f] = '%+d%%' % s return self.final.keys() def skip(self): @@ -172,18 +173,16 @@ def diff(bms, loops, track, old, new, counters): js_new_ctr = None js_old_ctr = None - if js_new_ctr: - for row in bm_json.expand_json(js_new_ctr, js_new_opt): - name = row['cpp_name'] - if name.endswith('_mean') or name.endswith('_stddev'): - continue - benchmarks[name].add_sample(track, row, True) - if js_old_ctr: - for row in bm_json.expand_json(js_old_ctr, js_old_opt): - name = row['cpp_name'] - if name.endswith('_mean') or name.endswith('_stddev'): - continue - benchmarks[name].add_sample(track, row, False) + for row in bm_json.expand_json(js_new_ctr, js_new_opt): + name = row['cpp_name'] + if name.endswith('_mean') or name.endswith('_stddev'): + continue + benchmarks[name].add_sample(track, row, True) + for row in bm_json.expand_json(js_old_ctr, js_old_opt): + name = row['cpp_name'] + if name.endswith('_mean') or name.endswith('_stddev'): + continue + benchmarks[name].add_sample(track, row, False) really_interesting = set() for name, bm in benchmarks.items(): diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py index 5bff8d0ca9d65d00c6a25a69d1583edb210287f6..4bf59fb28089c2d83cf70a2c3ebd1cee23075634 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py @@ -34,7 +34,7 @@ def speedup(new, old, threshold): if p0 > threshold: return 0 if s0 < 0: pct = 1 - while pct < 101: + while pct < 100: sp, pp = cmp(new, scale(old, 1 - pct / 100.0)) if sp > 0: break if pp > threshold: break @@ -42,7 +42,7 @@ def speedup(new, old, threshold): return -(pct - 1) else: pct = 1 - while pct < 100000: + while pct < 10000: sp, pp = cmp(new, scale(old, 1 + pct / 100.0)) if sp < 0: break if pp > threshold: break @@ -51,7 +51,7 @@ def speedup(new, old, threshold): if __name__ == "__main__": - new = [1.0, 1.0, 1.0, 1.0] - old = [2.0, 2.0, 2.0, 2.0] - print speedup(new, old) - print speedup(old, new) + new = [0.0, 0.0, 0.0, 0.0] + old=[2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06] + print speedup(new, old, 1e-5) + print speedup(old, new, 1e-5) diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py index 062611f1c761278bae1b3b338920eb8eed330175..930287e0d6961a58226fd1bd18e0907dddf52365 100644 --- a/tools/profiling/microbenchmarks/bm_json.py +++ b/tools/profiling/microbenchmarks/bm_json.py @@ -167,6 +167,8 @@ def parse_name(name): return out def expand_json(js, js2 = None): + assert(js or js2) + if not js: js = js2 for bm in js['benchmarks']: if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue context = js['context']