diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template
new file mode 100644
index 0000000000000000000000000000000000000000..c219fa888a8386d5d8508cc974cebecfbbbaa980
--- /dev/null
+++ b/tools/run_tests/perf_html_report.template
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html lang="en">
+<head><title>Performance Test Result</title></head>
+<body>
+  <h2>Performance Test Result</h2> 
+  <table style="width:50%" border="1">
+  <% sorted_test_cases = sorted(resultset.keys()) %>
+  % for test_case in sorted_test_cases:
+    <tr><td bgcolor="#00BFFF" style="width:30%"><b>${test_case}</b></td>
+    <% result = resultset[test_case] %>
+    <td>
+    % for k, v in result.iteritems():
+      ${k}: ${v}<br>
+    % endfor
+    </td>
+    </tr> 
+  % endfor
+  </table>
+
+</body>
+</html>
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 5648a694cd0db4881b093891ec5384c9e4f23316..7188d3dcd705895a3b988c3eead8227db46446ff 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -37,6 +37,8 @@ try:
   from mako import exceptions
 except (ImportError):
   pass  # Mako not installed but it is ok. 
+import glob
+import json
 import os
 import string
 import xml.etree.cElementTree as ET
@@ -120,3 +122,38 @@ def render_interop_html_report(
     print(exceptions.text_error_template().render())
     raise
 
+
+def render_perf_html_report(report_dir):
+  """Generate a simple HTML report for the perf tests."""
+  template_file = 'tools/run_tests/perf_html_report.template'
+  try:
+    mytemplate = Template(filename=template_file, format_exceptions=True)
+  except NameError:
+    print('Mako template is not installed. Skipping HTML report generation.')
+    return
+  except IOError as e:
+    print('Failed to find the template %s: %s' % (template_file, e))
+    return
+
+  resultset = {}
+  for result_file in glob.glob(os.path.join(report_dir, '*.json')):
+    with open(result_file, 'r') as f:
+      scenario_result = json.loads(f.read())
+      test_case = scenario_result['scenario']['name']
+      if 'ping_pong' in test_case:
+        latency50 = round(scenario_result['summary']['latency50'], 2)
+        latency99 = round(scenario_result['summary']['latency99'], 2)
+        summary = {'latency50': latency50, 'latency99': latency99}
+      else:
+        summary = {'qps': round(scenario_result['summary']['qps'], 2)}
+      resultset[test_case] = summary
+
+  args = {'resultset': resultset}
+
+  html_file_path = os.path.join(report_dir, 'index.html')
+  try:
+    with open(html_file_path, 'w') as output_file:
+      mytemplate.render_context(Context(output_file, **args))
+  except:
+    print(exceptions.text_error_template().render())
+    raise
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 5fdf7a407d90102128a69bb9ba7ffd36da69009b..5ff96968089f23ae2404fdc7deb80b730f822c22 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -40,6 +40,7 @@ import multiprocessing
 import os
 import pipes
 import re
+import report_utils
 import subprocess
 import sys
 import tempfile
@@ -54,6 +55,7 @@ os.chdir(_ROOT)
 
 
 _REMOTE_HOST_USERNAME = 'jenkins'
+_REPORT_DIR = 'perf_reports'
 
 
 class QpsWorkerJob:
@@ -103,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
     cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
   cmd += 'tools/run_tests/performance/run_qps_driver.sh '
   cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
-  cmd += '--scenario_result_file=scenario_result.json'
+  if not os.path.isdir(_REPORT_DIR):
+    os.makedirs(_REPORT_DIR)
+  report_path = os.path.join(_REPORT_DIR,
+                             '%s-scenario_result.json' % scenario_json['name'])
+  cmd += '--scenario_result_file=%s' % report_path  
   if remote_host:
     user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
     cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
@@ -436,6 +442,9 @@ try:
   jobset.message('START', 'Running scenarios.', do_newline=True)
   num_failures, _ = jobset.run(
       scenarios, newline_on_success=True, maxjobs=1)
+  
+  report_utils.render_perf_html_report(_REPORT_DIR)
+  
   if num_failures == 0:
     jobset.message('SUCCESS',
                    'All scenarios finished successfully.',