diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index a2852546b3723cf66ed38dc2f7492cb2016c4993..05a123329020757dd18683a4ba6b7c11b4822573 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -152,7 +152,7 @@ def collect_perf(bm_name, args): if len(benchmarks) >= 20: # run up to half the cpu count: each benchmark can use up to two cores # (one for the microbenchmark, one for the data flush) - jobset.run(benchmarks, maxjobs=1), + jobset.run(benchmarks, maxjobs=1, add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) @@ -161,7 +161,7 @@ def collect_perf(bm_name, args): cleanup = [] # run the remaining benchmarks that weren't flushed if len(benchmarks): - jobset.run(benchmarks, maxjobs=1), + jobset.run(benchmarks, maxjobs=1, add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())