diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index 9c0649cae68eb53316985c3534b01feaf13fec04..2583ceb8192693ee253a639f3b856ac8b827290d 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -52,6 +52,7 @@ #include "test/cpp/qps/driver.h" #include "test/cpp/qps/histogram.h" #include "test/cpp/qps/qps_worker.h" +#include "test/cpp/qps/stats.h" using std::list; using std::thread; @@ -115,6 +116,47 @@ static deque<string> get_workers(const string& name) { } } +// helpers for postprocess_scenario_result +static double WallTime(ClientStats s) { return s.time_elapsed(); } +static double SystemTime(ClientStats s) { return s.time_system(); } +static double UserTime(ClientStats s) { return s.time_user(); } +static double ServerWallTime(ServerStats s) { return s.time_elapsed(); } +static double ServerSystemTime(ServerStats s) { return s.time_system(); } +static double ServerUserTime(ServerStats s) { return s.time_user(); } +static int Cores(int n) { return n; } + +// Postprocess ScenarioResult and populate result summary. +static void postprocess_scenario_result(ScenarioResult* result) { + Histogram histogram; + histogram.MergeProto(result->latencies()); + + auto qps = histogram.Count() / average(result->client_stats(), WallTime); + auto qps_per_server_core = qps / sum(result->server_cores(), Cores); + + result->mutable_summary()->set_qps(qps); + result->mutable_summary()->set_qps_per_server_core(qps_per_server_core); + result->mutable_summary()->set_latency_50(histogram.Percentile(50)); + result->mutable_summary()->set_latency_90(histogram.Percentile(90)); + result->mutable_summary()->set_latency_95(histogram.Percentile(95)); + result->mutable_summary()->set_latency_99(histogram.Percentile(99)); + result->mutable_summary()->set_latency_999(histogram.Percentile(99.9)); + + auto server_system_time = 100.0 * + sum(result->server_stats(), ServerSystemTime) / + sum(result->server_stats(), ServerWallTime); + auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / + sum(result->server_stats(), ServerWallTime); + auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / + sum(result->client_stats(), WallTime); + auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) / + sum(result->client_stats(), WallTime); + + result->mutable_summary()->set_server_system_time(server_system_time); + result->mutable_summary()->set_server_user_time(server_user_time); + result->mutable_summary()->set_client_system_time(client_system_time); + result->mutable_summary()->set_client_user_time(client_user_time); +} + // Namespace for classes and functions used only in RunScenario // Using this rather than local definitions to workaround gcc-4.4 limitations // regarding using templates without linkage @@ -380,6 +422,8 @@ std::unique_ptr<ScenarioResult> RunScenario( } delete[] servers; + + postprocess_scenario_result(result.get()); return result; } diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc index 07ab0a8f284cb04b7390a05b1e9e9c58a74052e4..3ae41399cfcc1f013ad08035f9dad7b42b57c829 100644 --- a/test/cpp/qps/report.cc +++ b/test/cpp/qps/report.cc @@ -45,14 +45,6 @@ namespace grpc { namespace testing { -static double WallTime(ClientStats s) { return s.time_elapsed(); } -static double SystemTime(ClientStats s) { return s.time_system(); } -static double UserTime(ClientStats s) { return s.time_user(); } -static double ServerWallTime(ServerStats s) { return s.time_elapsed(); } -static double ServerSystemTime(ServerStats s) { return s.time_system(); } -static double ServerUserTime(ServerStats s) { return s.time_user(); } -static int Cores(int n) { return n; } - void CompositeReporter::add(std::unique_ptr<Reporter> reporter) { reporters_.emplace_back(std::move(reporter)); } @@ -82,44 +74,33 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) { } void GprLogReporter::ReportQPS(const ScenarioResult& result) { - Histogram histogram; - histogram.MergeProto(result.latencies()); - gpr_log(GPR_INFO, "QPS: %.1f", - histogram.Count() / average(result.client_stats(), WallTime)); + gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps()); } void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) { - Histogram histogram; - histogram.MergeProto(result.latencies()); - auto qps = histogram.Count() / average(result.client_stats(), WallTime); - - gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps, - qps / sum(result.server_cores(), Cores)); + gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", result.summary().qps(), + result.summary().qps_per_server_core()); } void GprLogReporter::ReportLatency(const ScenarioResult& result) { - Histogram histogram; - histogram.MergeProto(result.latencies()); gpr_log(GPR_INFO, "Latencies (50/90/95/99/99.9%%-ile): %.1f/%.1f/%.1f/%.1f/%.1f us", - histogram.Percentile(50) / 1000, histogram.Percentile(90) / 1000, - histogram.Percentile(95) / 1000, histogram.Percentile(99) / 1000, - histogram.Percentile(99.9) / 1000); + result.summary().latency_50() / 1000, + result.summary().latency_90() / 1000, + result.summary().latency_95() / 1000, + result.summary().latency_99() / 1000, + result.summary().latency_999() / 1000); } void GprLogReporter::ReportTimes(const ScenarioResult& result) { gpr_log(GPR_INFO, "Server system time: %.2f%%", - 100.0 * sum(result.server_stats(), ServerSystemTime) / - sum(result.server_stats(), ServerWallTime)); + result.summary().server_system_time()); gpr_log(GPR_INFO, "Server user time: %.2f%%", - 100.0 * sum(result.server_stats(), ServerUserTime) / - sum(result.server_stats(), ServerWallTime)); + result.summary().server_user_time()); gpr_log(GPR_INFO, "Client system time: %.2f%%", - 100.0 * sum(result.client_stats(), SystemTime) / - sum(result.client_stats(), WallTime)); + result.summary().client_system_time()); gpr_log(GPR_INFO, "Client user time: %.2f%%", - 100.0 * sum(result.client_stats(), UserTime) / - sum(result.client_stats(), WallTime)); + result.summary().client_user_time()); } void JsonReporter::ReportQPS(const ScenarioResult& result) { diff --git a/tools/gcp/utils/big_query_utils.py b/tools/gcp/utils/big_query_utils.py index 913afd059ebede3e9ba9c35c2b68d95d895b9c87..9dbc69c5d669a70293125cf7aa223031de3464f4 100755 --- a/tools/gcp/utils/big_query_utils.py +++ b/tools/gcp/utils/big_query_utils.py @@ -119,9 +119,13 @@ def insert_rows(big_query, project_id, dataset_id, table_id, rows_list): tableId=table_id, body=body) res = insert_req.execute(num_retries=NUM_RETRIES) + if res.get('insertErrors', None): + print 'Error inserting rows! Response: %s' % res + is_success = False except HttpError as http_error: - print 'Error in inserting rows in the table %s' % table_id + print 'Error inserting rows to the table %s' % table_id is_success = False + return is_success diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index 10d24a2517711f2d738b9ec4666e1ee8f803b8c5..032541475719d1cf4facd386e599c12deb812d58 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -148,52 +148,52 @@ "mode": "NULLABLE" }, { - "name": "qps_per_server_core", + "name": "qpsPerServerCore", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "server_system_time", + "name": "serverSystemTime", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "server_user_time", + "name": "serverUserTime", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "client_system_time", + "name": "clientSystemTime", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "client_user_time", + "name": "clientUserTime", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "latency_50", + "name": "latency50", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "latency_90", + "name": "latency90", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "latency_95", + "name": "latency95", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "latency_99", + "name": "latency99", "type": "FLOAT", "mode": "NULLABLE" }, { - "name": "latency_999", + "name": "latency999", "type": "FLOAT", "mode": "NULLABLE" }