diff --git a/test/core/memory_usage/client.c b/test/core/memory_usage/client.c index 09f0e2d8670f52d792e8b1b0a2b374566edf31c8..107abbc1b3da4c523af5a3e43216a41d9b435044 100644 --- a/test/core/memory_usage/client.c +++ b/test/core/memory_usage/client.c @@ -43,6 +43,7 @@ #include <grpc/support/log.h> #include <grpc/support/time.h> #include <grpc/support/useful.h> +#include "src/core/lib/support/env.h" #include "src/core/lib/support/string.h" #include "test/core/util/memory_counters.h" #include "test/core/util/test_config.h" @@ -310,6 +311,29 @@ int main(int argc, char **argv) { server_calls_end.total_size_relative - after_server_create.total_size_relative); + const char *csv_file = "memory_usage.csv"; + FILE *csv = fopen(csv_file, "w"); + if (csv) { + char *env_build = gpr_getenv("BUILD_NUMBER"); + char *env_job = gpr_getenv("JOB_NAME"); + fprintf(csv, "%f,%zi,%zi,%f,%zi,%s,%s\n", + (double)(client_calls_inflight.total_size_relative - + client_benchmark_calls_start.total_size_relative) / + benchmark_iterations, + client_channel_end.total_size_relative - + client_channel_start.total_size_relative, + after_server_create.total_size_relative - + before_server_create.total_size_relative, + (double)(server_calls_inflight.total_size_relative - + server_benchmark_calls_start.total_size_relative) / + benchmark_iterations, + server_calls_end.total_size_relative - + after_server_create.total_size_relative, + env_build == NULL ? "" : env_build, env_job == NULL ? "" : env_job); + fclose(csv); + gpr_log(GPR_INFO, "Summary written to %s", csv_file); + } + grpc_memory_counters_destroy(); return 0; } diff --git a/tools/profiling/latency_profile/run_latency_profile.sh b/tools/profiling/latency_profile/run_latency_profile.sh index e9baee09574aebdfb3392f5f66e6fe487a283b94..41423fc3c192d509cfb941c063c5beb411ee8be8 100755 --- a/tools/profiling/latency_profile/run_latency_profile.sh +++ b/tools/profiling/latency_profile/run_latency_profile.sh @@ -44,4 +44,9 @@ else PYTHON=python2.7 fi +make CONFIG=opt memory_profile_test memory_profile_client memory_profile_server +bins/opt/memory_profile_test +bq load microbenchmarks.memory memory_usage.csv + $PYTHON tools/run_tests/run_microbenchmark.py --collect summary perf latency --bigquery_upload +