Skip to content
Snippets Groups Projects
Commit f50020ce authored by Vijay Pai's avatar Vijay Pai
Browse files

Appease the const gods, improve readability, stop using 0 and 1 as

proxies for false and true.
parent d02988d6
No related branches found
No related tags found
No related merge requests found
...@@ -169,7 +169,7 @@ class Client { ...@@ -169,7 +169,7 @@ class Client {
// Must call AwaitThreadsCompletion before destructor to avoid a race // Must call AwaitThreadsCompletion before destructor to avoid a race
// between destructor and invocation of virtual ThreadFunc // between destructor and invocation of virtual ThreadFunc
void AwaitThreadsCompletion() { void AwaitThreadsCompletion() {
gpr_atm_rel_store(&thread_pool_done_, static_cast<gpr_atm>(1)); gpr_atm_rel_store(&thread_pool_done_, static_cast<gpr_atm>(true));
DestroyMultithreading(); DestroyMultithreading();
std::unique_lock<std::mutex> g(thread_completion_mu_); std::unique_lock<std::mutex> g(thread_completion_mu_);
while (threads_remaining_ != 0) { while (threads_remaining_ != 0) {
...@@ -182,7 +182,7 @@ class Client { ...@@ -182,7 +182,7 @@ class Client {
gpr_atm thread_pool_done_; gpr_atm thread_pool_done_;
void StartThreads(size_t num_threads) { void StartThreads(size_t num_threads) {
gpr_atm_rel_store(&thread_pool_done_, static_cast<gpr_atm>(0)); gpr_atm_rel_store(&thread_pool_done_, static_cast<gpr_atm>(false));
threads_remaining_ = num_threads; threads_remaining_ = num_threads;
for (size_t i = 0; i < num_threads; i++) { for (size_t i = 0; i < num_threads; i++) {
threads_.emplace_back(new Thread(this, i)); threads_.emplace_back(new Thread(this, i));
...@@ -274,14 +274,11 @@ class Client { ...@@ -274,14 +274,11 @@ class Client {
if (entry.used()) { if (entry.used()) {
histogram_.Add(entry.value()); histogram_.Add(entry.value());
} }
bool done = false;
if (!thread_still_ok) { if (!thread_still_ok) {
gpr_log(GPR_ERROR, "Finishing client thread due to RPC error"); gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
done = true;
} }
done = done || (gpr_atm_acq_load(&client_->thread_pool_done_) != if (!thread_still_ok ||
static_cast<gpr_atm>(0)); static_cast<bool>(gpr_atm_acq_load(&client_->thread_pool_done_))) {
if (done) {
client_->CompleteThread(); client_->CompleteThread();
return; return;
} }
......
...@@ -82,12 +82,12 @@ class SynchronousClient ...@@ -82,12 +82,12 @@ class SynchronousClient
// WaitToIssue returns false if we realize that we need to break out // WaitToIssue returns false if we realize that we need to break out
bool WaitToIssue(int thread_idx) { bool WaitToIssue(int thread_idx) {
if (!closed_loop_) { if (!closed_loop_) {
gpr_timespec next_issue_time = NextIssueTime(thread_idx); const gpr_timespec next_issue_time = NextIssueTime(thread_idx);
// Avoid sleeping for too long continuously because we might // Avoid sleeping for too long continuously because we might
// need to terminate before then. This is an issue since // need to terminate before then. This is an issue since
// exponential distribution can occasionally produce bad outliers // exponential distribution can occasionally produce bad outliers
while (true) { while (true) {
gpr_timespec one_sec_delay = const gpr_timespec one_sec_delay =
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(1, GPR_TIMESPAN)); gpr_time_from_seconds(1, GPR_TIMESPAN));
if (gpr_time_cmp(next_issue_time, one_sec_delay) <= 0) { if (gpr_time_cmp(next_issue_time, one_sec_delay) <= 0) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment