Skip to content
Snippets Groups Projects
Select Git revision
  • 32c804e6a76d37460f34dc7154431b2c38d86c99
  • master default protected
  • arm-aarch-platform
  • arm-platform
  • vjpai-patch-3
  • vjpai-patch-1
  • v1.27.x
  • jtattermusch-patch-2
  • jtattermusch-patch-1
  • update-java-worker-example-in-performance-docs
  • revert-21805-revert-21797-reintroduce_21527
  • revert-21804-tls-credentials-1
  • zhen_cleanup_namecheck
  • revert-21806-revert-21767-revert-21725-revert-21680-cq_ordering
  • vjpai-patch-2
  • revert-21766-tls-credentials-1
  • revert-21640-change_local_tcp_security_level
  • revert-21680-cq_ordering
  • revert-21527-unify_boringssl_deps2
  • revert-20803-grpclb_stabilization
  • fix-kokoro-rvm-key
  • v1.27.0
  • v1.27.0-pre2
  • v1.27.0-pre1
  • v1.26.0
  • v1.26.0-pre1
  • v1.25.0
  • v1.25.0-pre1
  • v1.24.3
  • v1.24.2
  • v1.24.1
  • v1.23.1
  • v1.24.0
  • v1.24.0-pre2
  • v1.24.0-pre1
  • v1.22.1
  • v1.23.0
  • v1.23.0-pre1
  • v1.22.0
  • v1.22.0-pre1
  • v1.21.4
41 results

buffer_pool.h

Blame
  • run_microbenchmark.py 7.42 KiB
    #!/usr/bin/env python2.7
    # Copyright 2017, Google Inc.
    # All rights reserved.
    #
    # Redistribution and use in source and binary forms, with or without
    # modification, are permitted provided that the following conditions are
    # met:
    #
    #     * Redistributions of source code must retain the above copyright
    # notice, this list of conditions and the following disclaimer.
    #     * Redistributions in binary form must reproduce the above
    # copyright notice, this list of conditions and the following disclaimer
    # in the documentation and/or other materials provided with the
    # distribution.
    #     * Neither the name of Google Inc. nor the names of its
    # contributors may be used to endorse or promote products derived from
    # this software without specific prior written permission.
    #
    # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    
    import multiprocessing
    import os
    import subprocess
    import sys
    import argparse
    
    import python_utils.jobset as jobset
    import python_utils.start_port_server as start_port_server
    
    flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
    
    os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
    if not os.path.exists('reports'):
      os.makedirs('reports')
    
    port_server_port = 32766
    start_port_server.start_port_server(port_server_port)
    
    def fnize(s):
      out = ''
      for c in s:
        if c in '<>, /':
          if len(out) and out[-1] == '_': continue
          out += '_'
        else:
          out += c
      return out
    
    # index html
    index_html = """
    <html>
    <head>
    <title>Microbenchmark Results</title>
    </head>
    <body>
    """
    
    def heading(name):
      global index_html
      index_html += "<h1>%s</h1>\n" % name
    
    def link(txt, tgt):
      global index_html
      index_html += "<p><a href=\"%s\">%s</a></p>\n" % (tgt, txt)
    
    def text(txt):
      global index_html
      index_html += "<p><pre>%s</pre></p>" % txt
    
    def collect_latency(bm_name, args):
      """generate latency profiles"""
      benchmarks = []
      profile_analysis = []
      cleanup = []
    
      heading('Latency Profiles: %s' % bm_name)
      subprocess.check_call(
          ['make', bm_name,
           'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
      for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
                                           '--benchmark_list_tests']).splitlines():
        link(line, '%s.txt' % fnize(line))
        benchmarks.append(
            jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line],
                           environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
        profile_analysis.append(
            jobset.JobSpec([sys.executable,
                            'tools/profiling/latency_profile/profile_analyzer.py',
                            '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
                            '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None))
        cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
        # periodically flush out the list of jobs: profile_analysis jobs at least
        # consume upwards of five gigabytes of ram in some cases, and so analysing
        # hundreds of them at once is impractical -- but we want at least some
        # concurrency or the work takes too long
        if len(benchmarks) >= min(4, multiprocessing.cpu_count()):
          # run up to half the cpu count: each benchmark can use up to two cores
          # (one for the microbenchmark, one for the data flush)
          jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
                     add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
          jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
          jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
          benchmarks = []
          profile_analysis = []
          cleanup = []
      # run the remaining benchmarks that weren't flushed
      if len(benchmarks):
        jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
                   add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
        jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
        jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
    
    def collect_perf(bm_name, args):
      """generate flamegraphs"""
      heading('Flamegraphs: %s' % bm_name)
      subprocess.check_call(
          ['make', bm_name,
           'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
      for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
                                           '--benchmark_list_tests']).splitlines():
        subprocess.check_call(['sudo', 'perf', 'record', '-g', '-c', '1000',
                               'bins/mutrace/%s' % bm_name,
                               '--benchmark_filter=^%s$' % line,
                               '--benchmark_min_time=20'])
        with open('/tmp/bm.perf', 'w') as f:
          f.write(subprocess.check_output(['sudo', 'perf', 'script']))
        with open('/tmp/bm.folded', 'w') as f:
          f.write(subprocess.check_output([
              '%s/stackcollapse-perf.pl' % flamegraph_dir, '/tmp/bm.perf']))
        link(line, '%s.svg' % fnize(line))
        with open('reports/%s.svg' % fnize(line), 'w') as f:
          f.write(subprocess.check_output([
              '%s/flamegraph.pl' % flamegraph_dir, '/tmp/bm.folded']))
    
    def collect_summary(bm_name, args):
      heading('Summary: %s' % bm_name)
      subprocess.check_call(
          ['make', bm_name,
           'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()])
      text(subprocess.check_output(['bins/counters/%s' % bm_name,
                                    '--benchmark_out=out.json',
                                    '--benchmark_out_format=json']))
      if args.bigquery_upload:
        with open('/tmp/out.csv', 'w') as f:
          f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json']))
        subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', 'out.csv'])
    
    collectors = {
      'latency': collect_latency,
      'perf': collect_perf,
      'summary': collect_summary,
    }
    
    argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
    argp.add_argument('-c', '--collect',
                      choices=sorted(collectors.keys()),
                      nargs='+',
                      default=sorted(collectors.keys()),
                      help='Which collectors should be run against each benchmark')
    argp.add_argument('-b', '--benchmarks',
                      default=['bm_fullstack'],
                      nargs='+',
                      type=str,
                      help='Which microbenchmarks should be run')
    argp.add_argument('--bigquery_upload',
                      default=False,
                      action='store_const',
                      const=True,
                      help='Upload results from summary collection to bigquery')
    args = argp.parse_args()
    
    for bm_name in args.benchmarks:
      for collect in args.collect:
        collectors[collect](bm_name, args)
    
    index_html += "</body>\n</html>\n"
    with open('reports/index.html', 'w') as f:
      f.write(index_html)