diff --git a/tools/run_tests/dockerjob.py b/tools/run_tests/dockerjob.py
index 1d67fe3033e2d0880fedb512cc96ff0dbeaaf80f..7d64222ba0b53931f57adf0869bc80ee6dd98000 100755
--- a/tools/run_tests/dockerjob.py
+++ b/tools/run_tests/dockerjob.py
@@ -101,7 +101,7 @@ class DockerJob:
 
   def __init__(self, spec):
     self._spec = spec
-    self._job = jobset.Job(spec, bin_hash=None, newline_on_success=True, travis=True, add_env={}, xml_report=None)
+    self._job = jobset.Job(spec, bin_hash=None, newline_on_success=True, travis=True, add_env={})
     self._container_name = spec.container_name
 
   def mapped_port(self, port):
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index a8ff9f613fbf55daf32e8914ae8128677262b4c3..0c4d1b8143c8157dd79814828ace7761899af376 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -34,15 +34,14 @@ import multiprocessing
 import os
 import platform
 import signal
-import string
 import subprocess
 import sys
 import tempfile
 import time
-import xml.etree.cElementTree as ET
 
 
 _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
+_MAX_RESULT_SIZE = 8192
 
 
 # setup a signal handler so that signal.pause registers 'something'
@@ -130,14 +129,6 @@ def which(filename):
   raise Exception('%s not found' % filename)
 
 
-def _filter_stdout(stdout):
-  """Filters out nonprintable and XML-illegal characters from stdout."""
-  # keep whitespaces but remove formfeed and vertical tab characters
-  # that make XML report unparseable.
-  return filter(lambda x: x in string.printable and x != '\f' and x != '\v',
-                stdout.decode(errors='ignore'))
-
-
 class JobSpec(object):
   """Specifies what to run for a job."""
 
@@ -190,14 +181,12 @@ class JobResult(object):
 class Job(object):
   """Manages one job."""
 
-  def __init__(self, spec, bin_hash, newline_on_success, travis, add_env, xml_report):
+  def __init__(self, spec, bin_hash, newline_on_success, travis, add_env):
     self._spec = spec
     self._bin_hash = bin_hash
     self._newline_on_success = newline_on_success
     self._travis = travis
     self._add_env = add_env.copy()
-    self._xml_test = ET.SubElement(xml_report, 'testcase',
-                                   name=self._spec.shortname) if xml_report is not None else None
     self._retries = 0
     self._timeout_retries = 0
     self._suppress_failure_message = False
@@ -224,20 +213,12 @@ class Job(object):
 
   def state(self, update_cache):
     """Poll current state of the job. Prints messages at completion."""
+    self._tempfile.seek(0)
+    stdout = self._tempfile.read()
+    self.result.message = stdout[-_MAX_RESULT_SIZE:]
     if self._state == _RUNNING and self._process.poll() is not None:
       elapsed = time.time() - self._start
-      self._tempfile.seek(0)
-      stdout = self._tempfile.read()
-      filtered_stdout = _filter_stdout(stdout)
-      # TODO: looks like jenkins master is slow because parsing the junit results XMLs is not
-      # implemented efficiently. This is an experiment to workaround the issue by making sure
-      # results.xml file is small enough.
-      filtered_stdout = filtered_stdout[-128:]
-      self.result.message = filtered_stdout
       self.result.elapsed_time = elapsed
-      if self._xml_test is not None:
-        self._xml_test.set('time', str(elapsed))
-        ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
       if self._process.returncode != 0:
         if self._retries < self._spec.flake_retries:
           message('FLAKE', '%s [ret=%d, pid=%d]' % (
@@ -256,8 +237,6 @@ class Job(object):
           self.result.state = 'FAILED'
           self.result.num_failures += 1
           self.result.returncode = self._process.returncode
-          if self._xml_test is not None:
-            ET.SubElement(self._xml_test, 'failure', message='Failure')
       else:
         self._state = _SUCCESS
         message('PASSED', '%s [time=%.1fsec; retries=%d;%d]' % (
@@ -267,10 +246,6 @@ class Job(object):
         if self._bin_hash:
           update_cache.finished(self._spec.identity(), self._bin_hash)
     elif self._state == _RUNNING and time.time() - self._start > self._spec.timeout_seconds:
-      self._tempfile.seek(0)
-      stdout = self._tempfile.read()
-      filtered_stdout = _filter_stdout(stdout)
-      self.result.message = filtered_stdout
       if self._timeout_retries < self._spec.timeout_retries:
         message('TIMEOUT_FLAKE', self._spec.shortname, stdout, do_newline=True)
         self._timeout_retries += 1
@@ -285,9 +260,6 @@ class Job(object):
         self.kill()
         self.result.state = 'TIMEOUT'
         self.result.num_failures += 1
-        if self._xml_test is not None:
-          ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
-          ET.SubElement(self._xml_test, 'error', message='Timeout')
     return self._state
 
   def kill(self):
@@ -305,7 +277,7 @@ class Jobset(object):
   """Manages one run of jobs."""
 
   def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
-               stop_on_failure, add_env, cache, xml_report):
+               stop_on_failure, add_env, cache):
     self._running = set()
     self._check_cancelled = check_cancelled
     self._cancelled = False
@@ -317,7 +289,6 @@ class Jobset(object):
     self._cache = cache
     self._stop_on_failure = stop_on_failure
     self._hashes = {}
-    self._xml_report = xml_report
     self._add_env = add_env
     self.resultset = {}
     
@@ -349,8 +320,7 @@ class Jobset(object):
                 bin_hash,
                 self._newline_on_success,
                 self._travis,
-                self._add_env,
-                self._xml_report)
+                self._add_env)
       self._running.add(job)
       self.resultset[job.GetSpec().shortname] = []
     return True
@@ -424,13 +394,11 @@ def run(cmdlines,
         infinite_runs=False,
         stop_on_failure=False,
         cache=None,
-        xml_report=None,
         add_env={}):
   js = Jobset(check_cancelled,
               maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
               newline_on_success, travis, stop_on_failure, add_env,
-              cache if cache is not None else NoCache(),
-              xml_report)
+              cache if cache is not None else NoCache())
   for cmdline in cmdlines:
     if not js.start(cmdline):
       break
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..57a93d0da056d42e569d826023e5b8bbcbd8cb59
--- /dev/null
+++ b/tools/run_tests/report_utils.py
@@ -0,0 +1,189 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Generate XML and HTML test reports."""
+
+import os
+import string
+import xml.etree.cElementTree as ET
+
+
+def _filter_msg(msg, output_format):
+  """Filters out nonprintable and illegal characters from the message."""
+  if output_format in ['XML', 'HTML']:
+    # keep whitespaces but remove formfeed and vertical tab characters
+    # that make XML report unparseable.
+    filtered_msg = filter(
+        lambda x: x in string.printable and x != '\f' and x != '\v',
+        msg.decode(errors='ignore'))
+    if output_format == 'HTML':
+      filtered_msg = filtered_msg.replace('"', '&quot;')
+    return filtered_msg
+  else:
+    return msg
+
+
+def render_xml_report(resultset, xml_report):
+  """Generate JUnit-like XML report."""
+  root = ET.Element('testsuites')
+  testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', 
+                            name='tests')
+  for shortname, results in resultset.iteritems(): 
+    for result in results:
+      xml_test = ET.SubElement(testsuite, 'testcase', name=shortname) 
+      if result.elapsed_time:
+        xml_test.set('time', str(result.elapsed_time))
+      ET.SubElement(xml_test, 'system-out').text = _filter_msg(result.message,
+                                                               'XML')
+      if result.state == 'FAILED':
+        ET.SubElement(xml_test, 'failure', message='Failure')
+      elif result.state == 'TIMEOUT':
+        ET.SubElement(xml_test, 'error', message='Timeout')
+  tree = ET.ElementTree(root)
+  tree.write(xml_report, encoding='UTF-8')
+
+
+# TODO(adelez): Use mako template.
+def fill_one_test_result(shortname, resultset, html_str):
+  if shortname in resultset:
+    # Because interop tests does not have runs_per_test flag, each test is run
+    # once. So there should only be one element for each result.
+    result = resultset[shortname][0] 
+    if result.state == 'PASSED':
+      html_str = '%s<td bgcolor=\"green\">PASS</td>\n' % html_str
+    else:
+      tooltip = ''
+      if result.returncode > 0 or result.message:
+        if result.returncode > 0:
+          tooltip = 'returncode: %d ' % result.returncode
+        if result.message:
+          escaped_msg = _filter_msg(result.message, 'HTML')
+          tooltip = '%smessage: %s' % (tooltip, escaped_msg)       
+      if result.state == 'FAILED':
+        html_str = '%s<td bgcolor=\"red\">' % html_str
+        if tooltip:  
+          html_str = ('%s<a href=\"#\" data-toggle=\"tooltip\" '
+                      'data-placement=\"auto\" title=\"%s\">FAIL</a></td>\n' % 
+                      (html_str, tooltip))
+        else:
+          html_str = '%sFAIL</td>\n' % html_str
+      elif result.state == 'TIMEOUT':
+        html_str = '%s<td bgcolor=\"yellow\">' % html_str
+        if tooltip:
+          html_str = ('%s<a href=\"#\" data-toggle=\"tooltip\" '
+                      'data-placement=\"auto\" title=\"%s\">TIMEOUT</a></td>\n' 
+                      % (html_str, tooltip))
+        else:
+          html_str = '%sTIMEOUT</td>\n' % html_str
+  else:
+    html_str = '%s<td bgcolor=\"magenta\">Not implemented</td>\n' % html_str
+  
+  return html_str
+
+
+def render_html_report(client_langs, server_langs, test_cases, auth_test_cases,
+                       resultset, num_failures, cloud_to_prod):
+  """Generate html report."""
+  sorted_test_cases = sorted(test_cases)
+  sorted_auth_test_cases = sorted(auth_test_cases)
+  sorted_client_langs = sorted(client_langs)
+  sorted_server_langs = sorted(server_langs)
+  html_str = ('<!DOCTYPE html>\n'
+              '<html lang=\"en\">\n'
+              '<head><title>Interop Test Result</title></head>\n'
+              '<body>\n')
+  if num_failures > 1:
+    html_str = (
+        '%s<p><h2><font color=\"red\">%d tests failed!</font></h2></p>\n' % 
+        (html_str, num_failures))
+  elif num_failures:
+    html_str = (
+        '%s<p><h2><font color=\"red\">%d test failed!</font></h2></p>\n' % 
+        (html_str, num_failures))
+  else:
+    html_str = (
+        '%s<p><h2><font color=\"green\">All tests passed!</font></h2></p>\n' % 
+        html_str)
+  if cloud_to_prod:
+    # Each column header is the client language.
+    html_str = ('%s<h2>Cloud to Prod</h2>\n' 
+                '<table style=\"width:100%%\" border=\"1\">\n'
+                '<tr bgcolor=\"#00BFFF\">\n'
+                '<th>Client languages &#9658;</th>\n') % html_str
+    for client_lang in sorted_client_langs:
+      html_str = '%s<th>%s\n' % (html_str, client_lang)
+    html_str = '%s</tr>\n' % html_str
+    for test_case in sorted_test_cases + sorted_auth_test_cases:
+      html_str = '%s<tr><td><b>%s</b></td>\n' % (html_str, test_case)
+      for client_lang in sorted_client_langs:
+        if not test_case in sorted_auth_test_cases:
+          shortname = 'cloud_to_prod:%s:%s' % (client_lang, test_case)
+        else:
+          shortname = 'cloud_to_prod_auth:%s:%s' % (client_lang, test_case)
+        html_str = fill_one_test_result(shortname, resultset, html_str)
+      html_str = '%s</tr>\n' % html_str 
+    html_str = '%s</table>\n' % html_str
+  if server_langs:
+    for test_case in sorted_test_cases:
+      # Each column header is the client language.
+      html_str = ('%s<h2>%s</h2>\n' 
+                  '<table style=\"width:100%%\" border=\"1\">\n'
+                  '<tr bgcolor=\"#00BFFF\">\n'
+                  '<th>Client languages &#9658;<br/>'
+                  'Server languages &#9660;</th>\n') % (html_str, test_case)
+      for client_lang in sorted_client_langs:
+        html_str = '%s<th>%s\n' % (html_str, client_lang)
+      html_str = '%s</tr>\n' % html_str
+      # Each row head is the server language.
+      for server_lang in sorted_server_langs:
+        html_str = '%s<tr><td><b>%s</b></td>\n' % (html_str, server_lang)
+        # Fill up the cells with test result.
+        for client_lang in sorted_client_langs:
+          shortname = 'cloud_to_cloud:%s:%s_server:%s' % (
+              client_lang, server_lang, test_case)
+          html_str = fill_one_test_result(shortname, resultset, html_str)
+        html_str = '%s</tr>\n' % html_str
+      html_str = '%s</table>\n' % html_str
+
+  html_str = ('%s\n'
+              '<script>\n'
+              '$(document).ready(function(){'
+              '$(\'[data-toggle=\"tooltip\"]\').tooltip();\n'   
+              '});\n'
+              '</script>\n'
+              '</body>\n'
+              '</html>') % html_str  
+  
+  # Write to reports/index.html as set up in Jenkins plugin.
+  html_report_dir = 'reports'
+  if not os.path.exists(html_report_dir):
+    os.mkdir(html_report_dir)
+  html_file_path = os.path.join(html_report_dir, 'index.html')
+  with open(html_file_path, 'w') as f:
+    f.write(html_str)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 729f962bb18f3a389a5eeff3f8904e57ed2f2cd4..cebe24688672d0108f6078e9d29a4da6ad1cf1f4 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -33,10 +33,10 @@
 import argparse
 import dockerjob
 import itertools
-import xml.etree.cElementTree as ET
 import jobset
 import multiprocessing
 import os
+import report_utils
 import subprocess
 import sys
 import tempfile
@@ -471,126 +471,6 @@ def build_interop_image_jobspec(language, tag=None):
   return build_job
 
 
-# TODO(adelez): Use mako template.
-def fill_one_test_result(shortname, resultset, html_str):
-  if shortname in resultset:
-    # Because interop tests does not have runs_per_test flag, each test is run
-    # once. So there should only be one element for each result.
-    result = resultset[shortname][0] 
-    if result.state == 'PASSED':
-      html_str = '%s<td bgcolor=\"green\">PASS</td>\n' % html_str
-    else:
-      tooltip = ''
-      if result.returncode > 0 or result.message:
-        if result.returncode > 0:
-          tooltip = 'returncode: %d ' % result.returncode
-        if result.message:
-          escaped_msg = result.message.replace('"', '&quot;')
-          tooltip = '%smessage: %s' % (tooltip, escaped_msg)     
-      if result.state == 'FAILED':
-        html_str = '%s<td bgcolor=\"red\">' % html_str
-        if tooltip:  
-          html_str = ('%s<a href=\"#\" data-toggle=\"tooltip\" '
-                      'data-placement=\"auto\" title=\"%s\">FAIL</a></td>\n' % 
-                      (html_str, tooltip))
-        else:
-          html_str = '%sFAIL</td>\n' % html_str
-      elif result.state == 'TIMEOUT':
-        html_str = '%s<td bgcolor=\"yellow\">' % html_str
-        if tooltip:
-          html_str = ('%s<a href=\"#\" data-toggle=\"tooltip\" '
-                      'data-placement=\"auto\" title=\"%s\">TIMEOUT</a></td>\n' 
-                      % (html_str, tooltip))
-        else:
-          html_str = '%sTIMEOUT</td>\n' % html_str
-  else:
-    html_str = '%s<td bgcolor=\"magenta\">Not implemented</td>\n' % html_str
-  
-  return html_str
-
-
-def render_html_report(client_langs, server_langs, resultset,
-                       num_failures):
-  """Generate html report."""
-  sorted_test_cases = sorted(_TEST_CASES)
-  sorted_auth_test_cases = sorted(_AUTH_TEST_CASES)
-  sorted_client_langs = sorted(client_langs)
-  sorted_server_langs = sorted(server_langs)
-  html_str = ('<!DOCTYPE html>\n'
-              '<html lang=\"en\">\n'
-              '<head><title>Interop Test Result</title></head>\n'
-              '<body>\n')
-  if num_failures > 1:
-    html_str = (
-        '%s<p><h2><font color=\"red\">%d tests failed!</font></h2></p>\n' % 
-        (html_str, num_failures))
-  elif num_failures:
-    html_str = (
-        '%s<p><h2><font color=\"red\">%d test failed!</font></h2></p>\n' % 
-        (html_str, num_failures))
-  else:
-    html_str = (
-        '%s<p><h2><font color=\"green\">All tests passed!</font></h2></p>\n' % 
-        html_str)
-  if args.cloud_to_prod_auth or args.cloud_to_prod:
-    # Each column header is the client language.
-    html_str = ('%s<h2>Cloud to Prod</h2>\n' 
-                '<table style=\"width:100%%\" border=\"1\">\n'
-                '<tr bgcolor=\"#00BFFF\">\n'
-                '<th>Client languages &#9658;</th>\n') % html_str
-    for client_lang in sorted_client_langs:
-      html_str = '%s<th>%s\n' % (html_str, client_lang)
-    html_str = '%s</tr>\n' % html_str
-    for test_case in sorted_test_cases + sorted_auth_test_cases:
-      html_str = '%s<tr><td><b>%s</b></td>\n' % (html_str, test_case)
-      for client_lang in sorted_client_langs:
-        if not test_case in sorted_auth_test_cases:
-          shortname = 'cloud_to_prod:%s:%s' % (client_lang, test_case)
-        else:
-          shortname = 'cloud_to_prod_auth:%s:%s' % (client_lang, test_case)
-        html_str = fill_one_test_result(shortname, resultset, html_str)
-      html_str = '%s</tr>\n' % html_str 
-    html_str = '%s</table>\n' % html_str
-  if servers:
-    for test_case in sorted_test_cases:
-      # Each column header is the client language.
-      html_str = ('%s<h2>%s</h2>\n' 
-                  '<table style=\"width:100%%\" border=\"1\">\n'
-                  '<tr bgcolor=\"#00BFFF\">\n'
-                  '<th>Client languages &#9658;<br/>'
-                  'Server languages &#9660;</th>\n') % (html_str, test_case)
-      for client_lang in sorted_client_langs:
-        html_str = '%s<th>%s\n' % (html_str, client_lang)
-      html_str = '%s</tr>\n' % html_str
-      # Each row head is the server language.
-      for server_lang in sorted_server_langs:
-        html_str = '%s<tr><td><b>%s</b></td>\n' % (html_str, server_lang)
-        # Fill up the cells with test result.
-        for client_lang in sorted_client_langs:
-          shortname = 'cloud_to_cloud:%s:%s_server:%s' % (
-              client_lang, server_lang, test_case)
-          html_str = fill_one_test_result(shortname, resultset, html_str)
-        html_str = '%s</tr>\n' % html_str
-      html_str = '%s</table>\n' % html_str
-
-  html_str = ('%s\n'
-              '<script>\n'
-              '$(document).ready(function(){'
-              '$(\'[data-toggle=\"tooltip\"]\').tooltip();\n'   
-              '});\n'
-              '</script>\n'
-              '</body>\n'
-              '</html>') % html_str  
-  
-  # Write to reports/index.html as set up in Jenkins plugin.
-  html_report_dir = 'reports'
-  if not os.path.exists(html_report_dir):
-    os.mkdir(html_report_dir)
-  html_file_path = os.path.join(html_report_dir, 'index.html')
-  with open(html_file_path, 'w') as f:
-    f.write(html_str)
-
-
 argp = argparse.ArgumentParser(description='Run interop tests.')
 argp.add_argument('-l', '--language',
                   choices=['all'] + sorted(_LANGUAGES),
@@ -740,22 +620,18 @@ try:
       dockerjob.remove_image(image, skip_nonexistent=True)
     sys.exit(1)
 
-  root = ET.Element('testsuites')
-  testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
-
   num_failures, resultset = jobset.run(jobs, newline_on_success=True, 
-                                       maxjobs=args.jobs, xml_report=testsuite)
+                                       maxjobs=args.jobs)
   if num_failures:
     jobset.message('FAILED', 'Some tests failed', do_newline=True)
   else:
     jobset.message('SUCCESS', 'All tests passed', do_newline=True)
 
-  tree = ET.ElementTree(root)
-  tree.write('report.xml', encoding='UTF-8')
+  report_utils.render_xml_report(resultset, 'report.xml')
   
-  # Generate HTML report.
-  render_html_report(set([str(l) for l in languages]), servers,
-                     resultset, num_failures)
+  report_utils.render_html_report(
+      set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES, 
+      resultset, num_failures, args.cloud_to_prod_auth or args.cloud_to_prod)
 
 finally:
   # Check if servers are still running.
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 4232637c7f83117b3edfe2f7e4e7004ee1155ae4..ae7899e47ee9bc23ff9daa0b50f7204f45b35e08 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -46,10 +46,10 @@ import sys
 import tempfile
 import traceback
 import time
-import xml.etree.cElementTree as ET
 import urllib2
 
 import jobset
+import report_utils
 import watch_dirs
 
 ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
@@ -867,15 +867,11 @@ def _build_and_run(
                      else itertools.repeat(massaged_one_run, runs_per_test))
     all_runs = itertools.chain.from_iterable(runs_sequence)
 
-    root = ET.Element('testsuites') if xml_report else None
-    testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests') if xml_report else None
-
     number_failures, resultset = jobset.run(
-        all_runs, check_cancelled, newline_on_success=newline_on_success, 
+        all_runs, check_cancelled, newline_on_success=newline_on_success,
         travis=travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
         stop_on_failure=args.stop_on_failure,
         cache=cache if not xml_report else None,
-        xml_report=testsuite,
         add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
     if resultset:
       for k, v in resultset.iteritems():
@@ -894,8 +890,7 @@ def _build_and_run(
     for antagonist in antagonists:
       antagonist.kill()
     if xml_report:
-      tree = ET.ElementTree(root)
-      tree.write(xml_report, encoding='UTF-8')
+      report_utils.render_xml_report(resultset, xml_report)
 
   number_failures, _ = jobset.run(
       post_tests_steps, maxjobs=1, stop_on_failure=True,