diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index 809817a1a8c4e7f0d1887cefa75f0cba8912f929..1ac951f3d861e2f58988f9b7d285a2ff32d4eaf1 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -144,7 +144,7 @@ def _read_json(filename, badjson_files, nonexistant_files):
 def fmt_dict(d):
   return ''.join(["    " + k + ": " + str(d[k]) + "\n" for k in d])
 
-def diff(bms, loops, track, old, new, counters):
+def diff(bms, loops, regex, track, old, new, counters):
   benchmarks = collections.defaultdict(Benchmark)
 
   badjson_files = {}
@@ -153,7 +153,8 @@ def diff(bms, loops, track, old, new, counters):
     for loop in range(0, loops):
       for line in subprocess.check_output(
         ['bm_diff_%s/opt/%s' % (old, bm),
-         '--benchmark_list_tests']).splitlines():
+         '--benchmark_list_tests', 
+         '--benchmark_filter=%s' % regex]).splitlines():
         stripped_line = line.strip().replace("/", "_").replace(
           "<", "_").replace(">", "_").replace(", ", "_")
         js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 8b4e0cb69a4519ba06174d073bc32a81c52b84b8..5aa11ac391ea79185f4d7c3e5184cf63144c5e47 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -63,10 +63,10 @@ def _args():
     help='Name of baseline run to compare to. Ususally just called "old"')
   argp.add_argument(
     '-r',
-    '--repetitions',
-    type=int,
-    default=1,
-    help='Number of repetitions to pass to the benchmarks')
+    '--regex',
+    type=str,
+    default="",
+    help='Regex to filter benchmarks run')
   argp.add_argument(
     '-l',
     '--loops',
@@ -125,10 +125,10 @@ def main(args):
       subprocess.check_call(['git', 'checkout', where_am_i])
       subprocess.check_call(['git', 'submodule', 'update'])
 
-  bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
-  bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
+  bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.regex, args.counters)
+  bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.regex, args.counters)
 
-  diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old,
+  diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old,
                 'new', args.counters)
   if diff:
     text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 72b3d3cf10627a5ef7f873ab3e5f7395093346c0..206f7c5845fb5c73cd7d908a98d92b2705e2a142 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -56,10 +56,10 @@ def _args():
   )
   argp.add_argument(
     '-r',
-    '--repetitions',
-    type=int,
-    default=1,
-    help='Number of repetitions to pass to the benchmarks')
+    '--regex',
+    type=str,
+    default="",
+    help='Regex to filter benchmarks run')
   argp.add_argument(
     '-l',
     '--loops',
@@ -77,18 +77,17 @@ def _args():
   return args
 
 
-def _collect_bm_data(bm, cfg, name, reps, idx, loops):
+def _collect_bm_data(bm, cfg, name, regex, idx, loops):
   jobs_list = []
   for line in subprocess.check_output(
     ['bm_diff_%s/%s/%s' % (name, cfg, bm),
-     '--benchmark_list_tests']).splitlines():
+     '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines():
     stripped_line = line.strip().replace("/", "_").replace(
       "<", "_").replace(">", "_").replace(", ", "_")
     cmd = [
       'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
       line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
       (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
-      '--benchmark_repetitions=%d' % (reps)
     ]
     jobs_list.append(
       jobset.JobSpec(
@@ -100,13 +99,13 @@ def _collect_bm_data(bm, cfg, name, reps, idx, loops):
   return jobs_list
 
 
-def run(name, benchmarks, jobs, loops, reps, counters):
+def run(name, benchmarks, jobs, loops, regex, counters):
   jobs_list = []
   for loop in range(0, loops):
     for bm in benchmarks:
-      jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
+      jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
       if counters:
-        jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
+        jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
                         loops)
   random.shuffle(jobs_list, random.SystemRandom().random)
   jobset.run(jobs_list, maxjobs=jobs)
@@ -114,4 +113,4 @@ def run(name, benchmarks, jobs, loops, reps, counters):
 
 if __name__ == '__main__':
   args = _args()
-  run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
+  run(args.name, args.benchmarks, args.jobs, args.loops, args.regex, args.counters)