Skip to content
Snippets Groups Projects
Commit 68e27bfe authored by Jan Tattermusch's avatar Jan Tattermusch
Browse files

enable running many runs per test without flooding the logs

parent fffb2676
No related branches found
No related tags found
No related merge requests found
......@@ -219,7 +219,8 @@ class JobResult(object):
class Job(object):
"""Manages one job."""
def __init__(self, spec, newline_on_success, travis, add_env):
def __init__(self, spec, newline_on_success, travis, add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
......@@ -227,7 +228,9 @@ class Job(object):
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
message('START', spec.shortname, do_newline=self._travis)
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
......@@ -302,10 +305,11 @@ class Job(object):
if real > 0.5:
cores = (user + sys) / real
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
if not self._quiet_success:
message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
......@@ -341,7 +345,7 @@ class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
stop_on_failure, add_env):
stop_on_failure, add_env, quiet_success):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
......@@ -352,6 +356,7 @@ class Jobset(object):
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self.resultset = {}
self._remaining = None
self._start_time = time.time()
......@@ -380,7 +385,8 @@ class Jobset(object):
job = Job(spec,
self._newline_on_success,
self._travis,
self._add_env)
self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
......@@ -403,7 +409,8 @@ class Jobset(object):
break
for job in dead:
self._completed += 1
self.resultset[job.GetSpec().shortname].append(job.result)
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead: return
if not self._travis and platform_string() != 'windows':
......@@ -463,7 +470,8 @@ def run(cmdlines,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False):
skip_jobs=False,
quiet_success=False):
if skip_jobs:
results = {}
skipped_job_result = JobResult()
......@@ -474,7 +482,8 @@ def run(cmdlines,
return results
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env)
newline_on_success, travis, stop_on_failure, add_env,
quiet_success)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
......
......@@ -1094,6 +1094,12 @@ argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
......@@ -1441,20 +1447,24 @@ def _build_and_run(
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port},
quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
elif num_failures > 0:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
......
......@@ -242,6 +242,17 @@ def _allowed_labels():
return sorted(all_labels)
def _runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag."""
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer'.format(arg_str)
raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__":
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j', '--jobs',
......@@ -269,7 +280,7 @@ if __name__ == "__main__":
default=False,
action='store_const',
const=True,
help='Filters out tests irrelavant to pull request changes.')
help='Filters out tests irrelevant to pull request changes.')
argp.add_argument('--base_branch',
default='origin/master',
type=str,
......@@ -278,6 +289,9 @@ if __name__ == "__main__":
default=_DEFAULT_INNER_JOBS,
type=int,
help='Number of jobs in each run_tests.py instance')
argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
help='How many times to run each tests. >1 runs implies ' +
'omitting passing test from the output & reports.')
args = argp.parse_args()
extra_args = []
......@@ -285,6 +299,10 @@ if __name__ == "__main__":
extra_args.append('--build_only')
if args.force_default_poller:
extra_args.append('--force_default_poller')
if args.runs_per_test > 1:
extra_args.append('-n')
extra_args.append('%s' % args.runs_per_test)
extra_args.append('--quiet_success')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment