From e4c3561074ca27fc4bdf1a6bf5b15ebaaf4519a4 Mon Sep 17 00:00:00 2001
From: Adele Zhou <adelez@google.com>
Date: Fri, 16 Oct 2015 15:34:23 -0700
Subject: [PATCH] generate html report for interop tests

---
 tools/run_tests/jobset.py            |  50 +++++++--
 tools/run_tests/run_interop_tests.py | 156 ++++++++++++++++++++++++---
 tools/run_tests/run_tests.py         |  24 +++--
 3 files changed, 194 insertions(+), 36 deletions(-)

diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index d44104f24c..a9c96a6c95 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -48,7 +48,7 @@ _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
 # setup a signal handler so that signal.pause registers 'something'
 # when a child finishes
 # not using futures and threading to avoid a dependency on subprocess32
-if platform.system() == "Windows":
+if platform.system() == 'Windows':
   pass
 else:
   have_alarm = False
@@ -118,8 +118,8 @@ def message(tag, msg, explanatory_text=None, do_newline=False):
   except:
     pass
 
-message.old_tag = ""
-message.old_msg = ""
+message.old_tag = ''
+message.old_msg = ''
 
 def which(filename):
   if '/' in filename:
@@ -177,6 +177,15 @@ class JobSpec(object):
     return self.identity() == other.identity()
 
 
+class JobResult(object):
+  def __init__(self):
+    self.state = 'UNKNOWN'
+    self.returncode = -1
+    self.elapsed_time = 0
+    self.retries = 0
+    self.message = ''
+    
+
 class Job(object):
   """Manages one job."""
 
@@ -192,8 +201,12 @@ class Job(object):
     self._timeout_retries = 0
     self._suppress_failure_message = False
     message('START', spec.shortname, do_newline=self._travis)
+    self.result = JobResult()
     self.start()
 
+  def GetSpec(self):
+    return self._spec
+
   def start(self):
     self._tempfile = tempfile.TemporaryFile()
     env = dict(os.environ)
@@ -219,6 +232,8 @@ class Job(object):
       # implemented efficiently. This is an experiment to workaround the issue by making sure
       # results.xml file is small enough.
       filtered_stdout = filtered_stdout[-128:]
+      self.result.message = filtered_stdout
+      self.result.elapsed_time = elapsed
       if self._xml_test is not None:
         self._xml_test.set('time', str(elapsed))
         ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
@@ -228,6 +243,7 @@ class Job(object):
             self._spec.shortname, self._process.returncode, self._process.pid),
             stdout, do_newline=True)
           self._retries += 1
+          self.result.retries = self._timeout_retries + self._retries
           self.start()
         else:
           self._state = _FAILURE
@@ -235,6 +251,8 @@ class Job(object):
             message('FAILED', '%s [ret=%d, pid=%d]' % (
                 self._spec.shortname, self._process.returncode, self._process.pid),
                 stdout, do_newline=True)
+          self.result.state = 'FAILED'
+          self.result.returncode = self._process.returncode
           if self._xml_test is not None:
             ET.SubElement(self._xml_test, 'failure', message='Failure')
       else:
@@ -242,20 +260,24 @@ class Job(object):
         message('PASSED', '%s [time=%.1fsec; retries=%d;%d]' % (
                     self._spec.shortname, elapsed, self._retries, self._timeout_retries),
             do_newline=self._newline_on_success or self._travis)
+        self.result.state = 'PASSED'
         if self._bin_hash:
           update_cache.finished(self._spec.identity(), self._bin_hash)
     elif self._state == _RUNNING and time.time() - self._start > self._spec.timeout_seconds:
       self._tempfile.seek(0)
       stdout = self._tempfile.read()
       filtered_stdout = _filter_stdout(stdout)
+      self.result.message = filtered_stdout
       if self._timeout_retries < self._spec.timeout_retries:
         message('TIMEOUT_FLAKE', self._spec.shortname, stdout, do_newline=True)
         self._timeout_retries += 1
+        self.result.retries = self._timeout_retries + self._retries
         self._process.terminate()
         self.start()
       else:
         message('TIMEOUT', self._spec.shortname, stdout, do_newline=True)
         self.kill()
+        self.result.state = 'TIMEOUT'
         if self._xml_test is not None:
           ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
           ET.SubElement(self._xml_test, 'error', message='Timeout')
@@ -290,6 +312,10 @@ class Jobset(object):
     self._hashes = {}
     self._xml_report = xml_report
     self._add_env = add_env
+    self.resultset = {}
+    
+  def get_num_failures(self):
+    return self._failures  
 
   def start(self, spec):
     """Start a job. Return True on success, False on failure."""
@@ -312,12 +338,14 @@ class Jobset(object):
       bin_hash = None
       should_run = True
     if should_run:
-      self._running.add(Job(spec,
-                            bin_hash,
-                            self._newline_on_success,
-                            self._travis,
-                            self._add_env,
-                            self._xml_report))
+      job = Job(spec,
+                bin_hash,
+                self._newline_on_success,
+                self._travis,
+                self._add_env,
+                self._xml_report)
+      self._running.add(job)
+      self.resultset[job.GetSpec().shortname] = None
     return True
 
   def reap(self):
@@ -337,6 +365,7 @@ class Jobset(object):
         break
       for job in dead:
         self._completed += 1
+        self.resultset[job.GetSpec().shortname] = job.result
         self._running.remove(job)
       if dead: return
       if (not self._travis):
@@ -398,4 +427,5 @@ def run(cmdlines,
   for cmdline in cmdlines:
     if not js.start(cmdline):
       break
-  return js.finish()
+  js.finish()  
+  return js.get_num_failures(), js.resultset
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 7789b04f44..80f28c544b 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -391,7 +391,7 @@ def cloud_to_prod_jobspec(language, test_case, docker_image=None, auth=False):
           cmdline=cmdline,
           cwd=cwd,
           environ=environ,
-          shortname="%s:%s:%s" % (suite_name, language, test_case),
+          shortname='%s:%s:%s' % (suite_name, language, test_case),
           timeout_seconds=2*60,
           flake_retries=5 if args.allow_flakes else 0,
           timeout_retries=2 if args.allow_flakes else 0,
@@ -423,7 +423,7 @@ def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
           cmdline=cmdline,
           cwd=cwd,
           environ=environ,
-          shortname="cloud_to_cloud:%s:%s_server:%s" % (language, server_name,
+          shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
                                                  test_case),
           timeout_seconds=2*60,
           flake_retries=5 if args.allow_flakes else 0,
@@ -448,7 +448,7 @@ def server_jobspec(language, docker_image):
   server_job = jobset.JobSpec(
           cmdline=docker_cmdline,
           environ=environ,
-          shortname="interop_server_%s" % language,
+          shortname='interop_server_%s' % language,
           timeout_seconds=30*60)
   server_job.container_name = container_name
   return server_job
@@ -467,16 +467,132 @@ def build_interop_image_jobspec(language, tag=None):
   # TODO(stanleycheung): find a more elegant way to do this
   if language.safename == 'php' and os.path.exists('/var/local/.composer/auth.json'):
     env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
-      "-v /var/local/.composer/auth.json:/root/.composer/auth.json:ro"
+      '-v /var/local/.composer/auth.json:/root/.composer/auth.json:ro'
   build_job = jobset.JobSpec(
           cmdline=['tools/jenkins/build_interop_image.sh'],
           environ=env,
-          shortname="build_docker_%s" % (language),
+          shortname='build_docker_%s' % (language),
           timeout_seconds=30*60)
   build_job.tag = tag
   return build_job
 
 
+# TODO(adelez): Use mako template.
+def fill_one_test_result(shortname, resultset, html_str):
+  if shortname in resultset:
+    result = resultset[shortname]
+    if result.state == 'PASSED':
+      html_str = '%s<td bgcolor=\"green\">PASS</td>\n' % html_str
+    else:
+      tooltip = ''
+      if result.returncode > 0 or result.message:
+        if result.returncode > 0:
+          tooltip = 'returncode: %d ' % result.returncode
+        if result.message:
+          tooltip = '%smessage: %s' % (tooltip, result.message)     
+      if result.state == 'FAILED':
+        html_str = '%s<td bgcolor=\"red\">' % html_str
+        if tooltip:  
+          html_str = ('%s<a href=\"#\" data-toggle=\"tooltip\" '
+                      'data-placement=\"auto\" title=\"%s\">FAIL</a></td>\n' % 
+                      (html_str, tooltip))
+        else:
+          html_str = '%sFAIL</td>\n' % html_str
+      elif result.state == 'TIMEOUT':
+        html_str = '%s<td bgcolor=\"yellow\">' % html_str
+        if tooltip:
+          html_str = ('%s<a href=\"#\" data-toggle=\"tooltip\" '
+                      'data-placement=\"auto\" title=\"%s\">TIMEOUT</a></td>\n' 
+                      % (html_str, tooltip))
+        else:
+          html_str = '%sTIMEOUT</td>\n' % html_str
+  else:
+    html_str = '%s<td bgcolor=\"magenta\">Not implemented</td>\n' % html_str
+  
+  return html_str
+
+
+def render_html_report(test_cases, client_langs, server_langs, resultset,
+                       num_failures):
+  """Generate html report."""
+  sorted_test_cases = sorted(test_cases)
+  sorted_client_langs = sorted(client_langs)
+  print sorted_client_langs
+  sorted_server_langs = sorted(server_langs)
+  html_str = ('<!DOCTYPE html>\n'
+              '<html lang=\"en\">\n'
+              '<head><title>Interop Test Result</title></head>\n'
+              '<body>\n')
+  if num_failures > 1:
+    html_str = (
+        '%s<p><h2><font color=\"red\">%d tests failed!</font></h2></p>\n' % 
+        (html_str, num_failures))
+  elif num_failures:
+    html_str = (
+        '%s<p><h2><font color=\"red\">%d test failed!</font></h2></p>\n' % 
+        (html_str, num_failures))
+  else:
+    html_str = (
+        '%s<p><h2><font color=\"green\">All tests passed!</font></h2></p>\n' % 
+        html_str)
+  if args.cloud_to_prod_auth or args.cloud_to_prod:
+    # Each column header is the client language.
+    html_str = ('%s<h2>Cloud to Prod</h2>\n' 
+                '<table style=\"width:100%%\" border=\"1\">\n'
+                '<tr bgcolor=\"#00BFFF\">\n'
+                '<th/>\n') % html_str
+    for client_lang in sorted_client_langs:
+      html_str = '%s<th>%s\n' % (html_str, client_lang)
+    html_str = '%s</tr>\n' % html_str
+    for test_case in sorted_test_cases:
+      html_str = '%s<tr><td><b>%s</b></td>\n' % (html_str, test_case)
+      for client_lang in sorted_client_langs:
+        if args.cloud_to_prod:
+          shortname = 'cloud_to_prod:%s:%s' % (client_lang, test_case)
+        else:
+          shortname = 'cloud_to_prod_auth:%s:%s' % (client_lang, test_case)
+        html_str = fill_one_test_result(shortname, resultset, html_str)       
+      html_str = '%s</tr>\n' % html_str 
+    html_str = '%s</table>\n' % html_str
+  if servers:
+    for test_case in sorted_test_cases:
+      # Each column header is the client language.
+      html_str = ('%s<h2>%s</h2>\n' 
+                  '<table style=\"width:100%%\" border=\"1\">\n'
+                  '<tr bgcolor=\"#00BFFF\">\n'
+                  '<th/>\n') % (html_str, test_case)
+      for client_lang in sorted_client_langs:
+        html_str = '%s<th>%s\n' % (html_str, client_lang)
+      html_str = '%s</tr>\n' % html_str
+      # Each row head is the server language.
+      for server_lang in sorted_server_langs:
+        html_str = '%s<tr><td><b>%s</b></td>\n' % (html_str, server_lang)
+        # Fill up the cells with test result.
+        for client_lang in sorted_client_langs:
+          shortname = 'cloud_to_cloud:%s:%s_server:%s' % (
+              client_lang, server_lang, test_case)
+          html_str = fill_one_test_result(shortname, resultset, html_str)
+        html_str = '%s</tr>\n' % html_str
+      html_str = '%s</table>\n' % html_str
+
+  html_str = ('%s\n'
+              '<script>\n'
+              '$(document).ready(function(){'
+              '$(\'[data-toggle=\"tooltip\"]\').tooltip();\n'   
+              '});\n'
+              '</script>\n'
+              '</body>\n'
+              '</html>') % html_str  
+  
+  # Write to reports/index.html as set up in Jenkins plugin.
+  html_report_dir = 'reports'
+  if not os.path.exists(html_report_dir):
+    os.mkdir(html_report_dir)
+  html_file_path = os.path.join(html_report_dir, 'index.html')
+  with open(html_file_path, 'w') as f:
+    f.write(html_str)
+
+
 argp = argparse.ArgumentParser(description='Run interop tests.')
 argp.add_argument('-l', '--language',
                   choices=['all'] + sorted(_LANGUAGES),
@@ -503,7 +619,7 @@ argp.add_argument('-s', '--server',
                   default=[])
 argp.add_argument('--override_server',
                   action='append',
-                  type=lambda kv: kv.split("="),
+                  type=lambda kv: kv.split('='),
                   help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
                   default=[])
 argp.add_argument('-t', '--travis',
@@ -521,7 +637,7 @@ argp.add_argument('--allow_flakes',
                   default=False,
                   action='store_const',
                   const=True,
-                  help="Allow flaky tests to show as passing (re-runs failed tests up to five times)")
+                  help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
 args = argp.parse_args()
 
 servers = set(s for s in itertools.chain.from_iterable(_SERVERS
@@ -538,7 +654,7 @@ if args.use_docker:
     time.sleep(5)
 
 if not args.use_docker and servers:
-  print "Running interop servers is only supported with --use_docker option enabled."
+  print 'Running interop servers is only supported with --use_docker option enabled.'
   sys.exit(1)
 
 languages = set(_LANGUAGES[l]
@@ -560,10 +676,14 @@ if args.use_docker:
 
   if build_jobs:
     jobset.message('START', 'Building interop docker images.', do_newline=True)
-    if jobset.run(build_jobs, newline_on_success=True, maxjobs=args.jobs):
-      jobset.message('SUCCESS', 'All docker images built successfully.', do_newline=True)
+    num_failures, _ = jobset.run(
+        build_jobs, newline_on_success=True, maxjobs=args.jobs)
+    if num_failures == 0:
+      jobset.message('SUCCESS', 'All docker images built successfully.', 
+                     do_newline=True)
     else:
-      jobset.message('FAILED', 'Failed to build interop docker images.', do_newline=True)
+      jobset.message('FAILED', 'Failed to build interop docker images.', 
+                     do_newline=True)
       for image in docker_images.itervalues():
         dockerjob.remove_image(image, skip_nonexistent=True)
       exit(1);
@@ -614,7 +734,7 @@ try:
         jobs.append(test_job)
 
   if not jobs:
-    print "No jobs to run."
+    print 'No jobs to run.'
     for image in docker_images.itervalues():
       dockerjob.remove_image(image, skip_nonexistent=True)
     sys.exit(1)
@@ -622,13 +742,19 @@ try:
   root = ET.Element('testsuites')
   testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
 
-  if jobset.run(jobs, newline_on_success=True, maxjobs=args.jobs, xml_report=testsuite):
-    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
-  else:
+  num_failures, resultset = jobset.run(jobs, newline_on_success=True, 
+                                       maxjobs=args.jobs, xml_report=testsuite)
+  if num_failures:
     jobset.message('FAILED', 'Some tests failed', do_newline=True)
+  else:
+    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
 
   tree = ET.ElementTree(root)
   tree.write('report.xml', encoding='UTF-8')
+  
+  # Generate HTML report.
+  render_html_report(_TEST_CASES, set([str(l) for l in languages]), servers, 
+                     resultset, num_failures)
 
 finally:
   # Check if servers are still running.
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 84262aa773..2170824caf 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -161,7 +161,7 @@ class CLanguage(object):
       if os.path.isfile(binary):
         out.append(config.job_spec([binary], [binary]))
       else:
-        print "\nWARNING: binary not found, skipping", binary
+        print '\nWARNING: binary not found, skipping', binary
     return sorted(out)
 
   def make_targets(self):
@@ -516,7 +516,7 @@ def runs_per_test_type(arg_str):
         if n <= 0: raise ValueError
         return n
     except:
-        msg = "'{}' isn't a positive integer or 'inf'".format(arg_str)
+        msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
         raise argparse.ArgumentTypeError(msg)
 
 # parse command line
@@ -555,14 +555,14 @@ argp.add_argument('--use_docker',
                   default=False,
                   action='store_const',
                   const=True,
-                  help="Run all the tests under docker. That provides " +
-                  "additional isolation and prevents the need to install " +
-                  "language specific prerequisites. Only available on Linux.")
+                  help='Run all the tests under docker. That provides ' +
+                  'additional isolation and prevents the need to install ' +
+                  'language specific prerequisites. Only available on Linux.')
 argp.add_argument('--allow_flakes',
                   default=False,
                   action='store_const',
                   const=True,
-                  help="Allow flaky tests to show as passing (re-runs failed tests up to five times)")
+                  help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
 argp.add_argument('-a', '--antagonists', default=0, type=int)
 argp.add_argument('-x', '--xml_report', default=None, type=str,
         help='Generates a JUnit-compatible XML report')
@@ -578,7 +578,7 @@ if args.use_docker:
     time.sleep(5)
 
   child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
-  run_tests_cmd = 'tools/run_tests/run_tests.py %s' % " ".join(child_argv[1:])
+  run_tests_cmd = 'tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
 
   # TODO(jtattermusch): revisit if we need special handling for arch here
   # set arch command prefix in case we are working with different arch.
@@ -625,9 +625,9 @@ if platform.system() == 'Windows':
     # better do parallel compilation
     # empirically /m:2 gives the best performance/price and should prevent
     # overloading the windows workers.
-    extra_args.extend(["/m:2"])
+    extra_args.extend(['/m:2'])
     # disable PDB generation: it's broken, and we don't need it during CI
-    extra_args.extend(["/p:Jenkins=true"])
+    extra_args.extend(['/p:Jenkins=true'])
     return [
       jobset.JobSpec(['vsprojects\\build.bat',
                       'vsprojects\\%s.sln' % target,
@@ -802,8 +802,10 @@ def _build_and_run(
     check_cancelled, newline_on_success, travis, cache, xml_report=None):
   """Do one pass of building & running tests."""
   # build latest sequentially
-  if not jobset.run(build_steps, maxjobs=1, stop_on_failure=True,
-                    newline_on_success=newline_on_success, travis=travis):
+  num_failures, _ = jobset.run(
+      build_steps, maxjobs=1, stop_on_failure=True,
+      newline_on_success=newline_on_success, travis=travis)
+  if num_failures:
     return 1
 
   # start antagonists
-- 
GitLab