Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
G
Grpc
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
tci-gateway-module
Grpc
Commits
d0ee10df
Commit
d0ee10df
authored
8 years ago
by
Matt Kwong
Browse files
Options
Downloads
Patches
Plain Diff
Change jenkins/run_performance.sh to use microbenchmarking
parent
aff1c05e
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
tools/jenkins/run_performance.sh
+6
-1
6 additions, 1 deletion
tools/jenkins/run_performance.sh
tools/run_tests/run_microbenchmark.py
+16
-14
16 additions, 14 deletions
tools/run_tests/run_microbenchmark.py
with
22 additions
and
15 deletions
tools/jenkins/run_performance.sh
+
6
−
1
View file @
d0ee10df
...
@@ -31,7 +31,12 @@
...
@@ -31,7 +31,12 @@
# This script is invoked by Jenkins and runs performance smoke test.
# This script is invoked by Jenkins and runs performance smoke test.
set
-ex
set
-ex
# List of benchmarks that provide good signal for analyzing performance changes in pull requests
BENCHMARKS_TO_RUN
=
"bm_closure bm_cq bm_call_create bm_error bm_chttp2_hpack bm_metadata"
# Enter the gRPC repo root
# Enter the gRPC repo root
cd
$(
dirname
$0
)
/../..
cd
$(
dirname
$0
)
/../..
tools/run_tests/run_performance_tests.py
-l
c++ node ruby csharp python
--netperf
--category
smoketest
# tools/run_tests/run_performance_tests.py -l c++ node ruby csharp python --netperf --category smoketest
# todo(mattkwong): Change performance test to use microbenchmarking
tools/run_tests/run_microbenchmark.py
-c
summary
--diff_perf
origin/
$ghprbTargetBranch
-b
$BENCHMARKS_TO_RUN
This diff is collapsed.
Click to expand it.
tools/run_tests/run_microbenchmark.py
+
16
−
14
View file @
d0ee10df
...
@@ -38,6 +38,17 @@ import argparse
...
@@ -38,6 +38,17 @@ import argparse
import
python_utils.jobset
as
jobset
import
python_utils.jobset
as
jobset
import
python_utils.start_port_server
as
start_port_server
import
python_utils.start_port_server
as
start_port_server
_AVAILABLE_BENCHMARK_TESTS
=
[
'
bm_fullstack_unary_ping_pong
'
,
'
bm_fullstack_streaming_ping_pong
'
,
'
bm_fullstack_streaming_pump
'
,
'
bm_closure
'
,
'
bm_cq
'
,
'
bm_call_create
'
,
'
bm_error
'
,
'
bm_chttp2_hpack
'
,
'
bm_metadata
'
,
'
bm_fullstack_trickle
'
]
flamegraph_dir
=
os
.
path
.
join
(
os
.
path
.
expanduser
(
'
~
'
),
'
FlameGraph
'
)
flamegraph_dir
=
os
.
path
.
join
(
os
.
path
.
expanduser
(
'
~
'
),
'
FlameGraph
'
)
os
.
chdir
(
os
.
path
.
join
(
os
.
path
.
dirname
(
sys
.
argv
[
0
]),
'
../..
'
))
os
.
chdir
(
os
.
path
.
join
(
os
.
path
.
dirname
(
sys
.
argv
[
0
]),
'
../..
'
))
...
@@ -201,17 +212,8 @@ argp.add_argument('-c', '--collect',
...
@@ -201,17 +212,8 @@ argp.add_argument('-c', '--collect',
default
=
sorted
(
collectors
.
keys
()),
default
=
sorted
(
collectors
.
keys
()),
help
=
'
Which collectors should be run against each benchmark
'
)
help
=
'
Which collectors should be run against each benchmark
'
)
argp
.
add_argument
(
'
-b
'
,
'
--benchmarks
'
,
argp
.
add_argument
(
'
-b
'
,
'
--benchmarks
'
,
default
=
[
'
bm_fullstack_unary_ping_pong
'
,
choices
=
_AVAILABLE_BENCHMARK_TESTS
,
'
bm_fullstack_streaming_ping_pong
'
,
default
=
_AVAILABLE_BENCHMARK_TESTS
,
'
bm_fullstack_streaming_pump
'
,
'
bm_closure
'
,
'
bm_cq
'
,
'
bm_call_create
'
,
'
bm_error
'
,
'
bm_chttp2_hpack
'
,
'
bm_metadata
'
,
'
bm_fullstack_trickle
'
,
],
nargs
=
'
+
'
,
nargs
=
'
+
'
,
type
=
str
,
type
=
str
,
help
=
'
Which microbenchmarks should be run
'
)
help
=
'
Which microbenchmarks should be run
'
)
...
@@ -229,20 +231,20 @@ argp.add_argument('--summary_time',
...
@@ -229,20 +231,20 @@ argp.add_argument('--summary_time',
type
=
int
,
type
=
int
,
help
=
'
Minimum time to run benchmarks for the summary collection
'
)
help
=
'
Minimum time to run benchmarks for the summary collection
'
)
args
=
argp
.
parse_args
()
args
=
argp
.
parse_args
()
if
args
.
diff_perf
:
git_comment
=
''
try
:
try
:
for
collect
in
args
.
collect
:
for
collect
in
args
.
collect
:
for
bm_name
in
args
.
benchmarks
:
for
bm_name
in
args
.
benchmarks
:
collectors
[
collect
](
bm_name
,
args
)
collectors
[
collect
](
bm_name
,
args
)
if
args
.
diff_perf
:
if
args
.
diff_perf
:
git_comment
=
'
Performance differences between this PR and %s
\\
n
'
%
args
.
diff_perf
if
'
summary
'
not
in
args
.
collect
:
if
'
summary
'
not
in
args
.
collect
:
for
bm_name
in
args
.
benchmarks
:
for
bm_name
in
args
.
benchmarks
:
run_summary
(
bm_name
,
'
opt
'
,
bm_name
)
run_summary
(
bm_name
,
'
opt
'
,
bm_name
)
run_summary
(
bm_name
,
'
counters
'
,
bm_name
)
run_summary
(
bm_name
,
'
counters
'
,
bm_name
)
where_am_i
=
subprocess
.
check_output
([
'
git
'
,
'
rev-parse
'
,
'
--abbrev-ref
'
,
'
HEAD
'
]).
strip
()
where_am_i
=
subprocess
.
check_output
([
'
git
'
,
'
rev-parse
'
,
'
--abbrev-ref
'
,
'
HEAD
'
]).
strip
()
subprocess
.
check_call
([
'
git
'
,
'
checkout
'
,
args
.
diff_perf
])
# todo(mattkwong): uncomment this before merging
# subprocess.check_call(['git', 'checkout', args.diff_perf])
comparables
=
[]
comparables
=
[]
subprocess
.
check_call
([
'
make
'
,
'
clean
'
])
subprocess
.
check_call
([
'
make
'
,
'
clean
'
])
try
:
try
:
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment