From 3345e1ccf766d813dcd0f07324214cf623725ff0 Mon Sep 17 00:00:00 2001
From: ncteisen <ncteisen@gmail.com>
Date: Mon, 19 Jun 2017 09:28:22 -0700
Subject: [PATCH] Actually enable trickle diff

---
 tools/jenkins/run_trickle_diff.sh             |  2 +-
 .../microbenchmarks/bm_diff/bm_constants.py   |  5 ++--
 .../microbenchmarks/bm_diff/bm_diff.py        | 29 +++++++++----------
 .../microbenchmarks/bm_diff/bm_speedup.py     | 12 ++++----
 tools/profiling/microbenchmarks/bm_json.py    |  2 ++
 5 files changed, 26 insertions(+), 24 deletions(-)

diff --git a/tools/jenkins/run_trickle_diff.sh b/tools/jenkins/run_trickle_diff.sh
index da905d0249..47dd8b44d6 100755
--- a/tools/jenkins/run_trickle_diff.sh
+++ b/tools/jenkins/run_trickle_diff.sh
@@ -20,4 +20,4 @@ set -ex
 cd $(dirname $0)/../..
 
 tools/run_tests/start_port_server.py
-tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls cli_stream_stalls svr_transport_stalls svr_stream_stalls --no-counters --pr_comment_name trickle
+tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls_per_iteration cli_stream_stalls_per_iteration svr_transport_stalls_per_iteration svr_stream_stalls_per_iteration --no-counters --pr_comment_name trickle
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 4cd65867c3..ad79a0a197 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -26,5 +26,6 @@ _AVAILABLE_BENCHMARK_TESTS = [
 _INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
         'allocs_per_iteration', 'writes_per_iteration',
         'atm_cas_per_iteration', 'atm_add_per_iteration',
-        'nows_per_iteration', 'cli_transport_stalls', 'cli_stream_stalls',
-        'svr_transport_stalls', 'svr_stream_stalls',)
+        'nows_per_iteration', 'cli_transport_stalls_per_iteration', 
+        'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration',
+        'svr_stream_stalls_per_iteration',)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index ec1840e2a1..809817a1a8 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -108,9 +108,10 @@ class Benchmark:
       mdn_diff = abs(_median(new) - _median(old))
       _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
              (f, new_name, new, old_name, old, mdn_diff))
-      s = bm_speedup.speedup(new, old, 1e-10)
-      if abs(s) > 3 and mdn_diff > 0.5:
-        self.final[f] = '%+d%%' % s
+      s = bm_speedup.speedup(new, old, 1e-5)
+      if abs(s) > 3:
+        if mdn_diff > 0.5 or 'trickle' in f:
+          self.final[f] = '%+d%%' % s
     return self.final.keys()
 
   def skip(self):
@@ -172,18 +173,16 @@ def diff(bms, loops, track, old, new, counters):
           js_new_ctr = None
           js_old_ctr = None
 
-        if js_new_ctr:
-          for row in bm_json.expand_json(js_new_ctr, js_new_opt):
-            name = row['cpp_name']
-            if name.endswith('_mean') or name.endswith('_stddev'):
-              continue
-            benchmarks[name].add_sample(track, row, True)
-        if js_old_ctr:
-          for row in bm_json.expand_json(js_old_ctr, js_old_opt):
-            name = row['cpp_name']
-            if name.endswith('_mean') or name.endswith('_stddev'):
-              continue
-            benchmarks[name].add_sample(track, row, False)
+        for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+          name = row['cpp_name']
+          if name.endswith('_mean') or name.endswith('_stddev'):
+            continue
+          benchmarks[name].add_sample(track, row, True)
+        for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+          name = row['cpp_name']
+          if name.endswith('_mean') or name.endswith('_stddev'):
+            continue
+          benchmarks[name].add_sample(track, row, False)
 
   really_interesting = set()
   for name, bm in benchmarks.items():
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
index 5bff8d0ca9..4bf59fb280 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
@@ -34,7 +34,7 @@ def speedup(new, old, threshold):
   if p0 > threshold: return 0
   if s0 < 0:
     pct = 1
-    while pct < 101:
+    while pct < 100:
       sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
       if sp > 0: break
       if pp > threshold: break
@@ -42,7 +42,7 @@ def speedup(new, old, threshold):
     return -(pct - 1)
   else:
     pct = 1
-    while pct < 100000:
+    while pct < 10000:
       sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
       if sp < 0: break
       if pp > threshold: break
@@ -51,7 +51,7 @@ def speedup(new, old, threshold):
 
 
 if __name__ == "__main__":
-  new = [1.0, 1.0, 1.0, 1.0]
-  old = [2.0, 2.0, 2.0, 2.0]
-  print speedup(new, old)
-  print speedup(old, new)
+  new = [0.0, 0.0, 0.0, 0.0] 
+  old=[2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
+  print speedup(new, old, 1e-5)
+  print speedup(old, new, 1e-5)
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index 062611f1c7..930287e0d6 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -167,6 +167,8 @@ def parse_name(name):
   return out
 
 def expand_json(js, js2 = None):
+  assert(js or js2)
+  if not js: js = js2
   for bm in js['benchmarks']:
     if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
     context = js['context']
-- 
GitLab