Skip to content
Snippets Groups Projects
Commit f8365cd8 authored by Yuxuan Li's avatar Yuxuan Li
Browse files

clean up, fix minor issue

parent 28efff3e
No related branches found
No related tags found
No related merge requests found
...@@ -227,7 +227,7 @@ struct grpc_completion_queue { ...@@ -227,7 +227,7 @@ struct grpc_completion_queue {
/* TODO: sreek - This will no longer be needed. Use polling_type set */ /* TODO: sreek - This will no longer be needed. Use polling_type set */
int is_non_listening_server_cq; int is_non_listening_server_cq;
int num_pluckers; int num_pluckers;
gpr_atm num_poll; gpr_atm num_polls;
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS]; plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
grpc_closure pollset_shutdown_done; grpc_closure pollset_shutdown_done;
...@@ -293,7 +293,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal( ...@@ -293,7 +293,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
cc->is_server_cq = 0; cc->is_server_cq = 0;
cc->is_non_listening_server_cq = 0; cc->is_non_listening_server_cq = 0;
cc->num_pluckers = 0; cc->num_pluckers = 0;
gpr_atm_no_barrier_store(&cc->num_poll, 0); gpr_atm_no_barrier_store(&cc->num_polls, 0);
gpr_atm_no_barrier_store(&cc->things_queued_ever, 0); gpr_atm_no_barrier_store(&cc->things_queued_ever, 0);
#ifndef NDEBUG #ifndef NDEBUG
cc->outstanding_tag_count = 0; cc->outstanding_tag_count = 0;
...@@ -311,7 +311,7 @@ grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue *cc) { ...@@ -311,7 +311,7 @@ grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue *cc) {
} }
gpr_atm grpc_get_cq_poll_num(grpc_completion_queue *cc) { gpr_atm grpc_get_cq_poll_num(grpc_completion_queue *cc) {
return gpr_atm_no_barrier_load(&cc->num_poll); return gpr_atm_no_barrier_load(&cc->num_polls);
} }
#ifdef GRPC_CQ_REF_COUNT_DEBUG #ifdef GRPC_CQ_REF_COUNT_DEBUG
...@@ -598,7 +598,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, ...@@ -598,7 +598,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_mu_lock(cc->mu); gpr_mu_lock(cc->mu);
continue; continue;
} else { } else {
cc->num_poll++; gpr_atm_no_barrier_fetch_add(&cc->num_polls, 1);
grpc_error *err = cc->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cc), grpc_error *err = cc->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cc),
NULL, now, iteration_deadline); NULL, now, iteration_deadline);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
...@@ -791,7 +791,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, ...@@ -791,7 +791,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(cc->mu); gpr_mu_lock(cc->mu);
} else { } else {
gpr_atm_no_barrier_fetch_add(&cc->num_poll, 1); gpr_atm_no_barrier_fetch_add(&cc->num_polls, 1);
grpc_error *err = cc->poller_vtable->work( grpc_error *err = cc->poller_vtable->work(
&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now, iteration_deadline); &exec_ctx, POLLSET_FROM_CQ(cc), &worker, now, iteration_deadline);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
......
...@@ -163,7 +163,8 @@ class Client { ...@@ -163,7 +163,8 @@ class Client {
MaybeStartRequests(); MaybeStartRequests();
int last_reset_poll_count_to_use = last_reset_poll_count_; int cur_poll_count = GetPollCount();
int poll_count = cur_poll_count - last_reset_poll_count_;
if (reset) { if (reset) {
std::vector<Histogram> to_merge(threads_.size()); std::vector<Histogram> to_merge(threads_.size());
std::vector<StatusHistogram> to_merge_status(threads_.size()); std::vector<StatusHistogram> to_merge_status(threads_.size());
...@@ -178,7 +179,7 @@ class Client { ...@@ -178,7 +179,7 @@ class Client {
MergeStatusHistogram(to_merge_status[i], &statuses); MergeStatusHistogram(to_merge_status[i], &statuses);
} }
timer_result = timer->Mark(); timer_result = timer->Mark();
last_reset_poll_count_ = GetPollCount(); last_reset_poll_count_ = cur_poll_count;
} else { } else {
// merge snapshots of each thread histogram // merge snapshots of each thread histogram
for (size_t i = 0; i < threads_.size(); i++) { for (size_t i = 0; i < threads_.size(); i++) {
...@@ -198,10 +199,7 @@ class Client { ...@@ -198,10 +199,7 @@ class Client {
stats.set_time_elapsed(timer_result.wall); stats.set_time_elapsed(timer_result.wall);
stats.set_time_system(timer_result.system); stats.set_time_system(timer_result.system);
stats.set_time_user(timer_result.user); stats.set_time_user(timer_result.user);
gpr_log(GPR_INFO, "*****poll count : %d %d %d", GetPollCount(), stats.set_cq_poll_count(poll_count);
last_reset_poll_count_, last_reset_poll_count_to_use);
stats.set_cq_poll_count(GetPollCount() - last_reset_poll_count_to_use);
return stats; return stats;
} }
......
...@@ -211,11 +211,8 @@ class AsyncClient : public ClientImpl<StubType, RequestType> { ...@@ -211,11 +211,8 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
int GetPollCount() { int GetPollCount() {
int count = 0; int count = 0;
// int i = 0;
for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) { for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
int k = (int)grpc_get_cq_poll_num((*cq)->cq()); count += (int)grpc_get_cq_poll_num((*cq)->cq());
// gpr_log(GPR_INFO, "%d: per cq poll:%d", i++, k);
count += k;
} }
return count; return count;
} }
......
...@@ -63,12 +63,13 @@ class Server { ...@@ -63,12 +63,13 @@ class Server {
ServerStats Mark(bool reset) { ServerStats Mark(bool reset) {
UsageTimer::Result timer_result; UsageTimer::Result timer_result;
int last_reset_poll_count_to_use = last_reset_poll_count_; int cur_poll_count = GetPollCount();
int poll_count = cur_poll_count - last_reset_poll_count_;
if (reset) { if (reset) {
std::unique_ptr<UsageTimer> timer(new UsageTimer); std::unique_ptr<UsageTimer> timer(new UsageTimer);
timer.swap(timer_); timer.swap(timer_);
timer_result = timer->Mark(); timer_result = timer->Mark();
last_reset_poll_count_ = GetPollCount(); last_reset_poll_count_ = cur_poll_count;
} else { } else {
timer_result = timer_->Mark(); timer_result = timer_->Mark();
} }
...@@ -79,7 +80,7 @@ class Server { ...@@ -79,7 +80,7 @@ class Server {
stats.set_time_user(timer_result.user); stats.set_time_user(timer_result.user);
stats.set_total_cpu_time(timer_result.total_cpu_time); stats.set_total_cpu_time(timer_result.total_cpu_time);
stats.set_idle_cpu_time(timer_result.idle_cpu_time); stats.set_idle_cpu_time(timer_result.idle_cpu_time);
stats.set_cq_poll_count(GetPollCount() - last_reset_poll_count_to_use); stats.set_cq_poll_count(poll_count);
return stats; return stats;
} }
......
...@@ -160,11 +160,8 @@ class AsyncQpsServerTest final : public grpc::testing::Server { ...@@ -160,11 +160,8 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
int GetPollCount() { int GetPollCount() {
int count = 0; int count = 0;
// int i = 0;
for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); cq++) { for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); cq++) {
int k = (int)grpc_get_cq_poll_num((*cq)->cq()); count += (int)grpc_get_cq_poll_num((*cq)->cq());
// gpr_log(GPR_INFO, "%d: per cq poll:%d", i++, k);
count += k;
} }
return count; return count;
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment