diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h
index 95023d2f8014e817dbb3c83fbb2cfb28a4b22836..4045e13460f56bb517c2995565093ad7598fb368 100644
--- a/test/cpp/qps/client.h
+++ b/test/cpp/qps/client.h
@@ -114,10 +114,14 @@ class ClientRequestCreator<ByteBuffer> {
 
 class HistogramEntry GRPC_FINAL {
  public:
-  HistogramEntry(): used_(false) {}
-  bool used() const {return used_;}
-  double value() const {return value_;}
-  void set_value(double v) {used_ = true; value_ = v;}
+  HistogramEntry() : used_(false) {}
+  bool used() const { return used_; }
+  double value() const { return value_; }
+  void set_value(double v) {
+    used_ = true;
+    value_ = v;
+  }
+
  private:
   bool used_;
   double value_;
@@ -171,6 +175,7 @@ class Client {
       threads_complete_.wait(g);
     }
   }
+
  protected:
   bool closed_loop_;
 
@@ -254,8 +259,7 @@ class Client {
       n->Swap(&histogram_);
     }
 
-    void EndSwap() {
-    }
+    void EndSwap() {}
 
     void MergeStatsInto(Histogram* hist) {
       std::unique_lock<std::mutex> g(mu_);
@@ -281,7 +285,7 @@ class Client {
           done_ = true;
         }
         if (done_) {
-	  client_->CompleteThread();
+          client_->CompleteThread();
           return;
         }
       }
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index f7fe746bbf8cd1d2757bc54777adc8989b20a071..feb58e7a827d57e2d02aee2c2684141ad210fe21 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -198,6 +198,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
       }
     }
   }
+
  protected:
   const int num_async_threads_;
 
@@ -224,7 +225,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
     for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
       (*cq)->Shutdown();
     }
-    this->EndThreads(); // this needed for resolution
+    this->EndThreads();  // this needed for resolution
   }
 
   bool ThreadFunc(HistogramEntry* entry,
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index cc2c5ca540a6d79c7d86c3e4d48ad569f6b2e71d..25c78235532625280fb18fc51980b7a4d4c7fc38 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -87,6 +87,7 @@ class SynchronousClient
 
   size_t num_threads_;
   std::vector<SimpleResponse> responses_;
+
  private:
   void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL { EndThreads(); }
 };
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index 7f12ee9c0e47a0a4e47f86f3b363b48a7c5fedfb..2aeaea51f2540d0df94b93ef7fe3edce6ac36899 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -348,7 +348,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
     ClientArgs args;
     *args.mutable_setup() = per_client_config;
     clients[i].stream =
-      clients[i].stub->RunClient(runsc::AllocContext(&contexts));
+        clients[i].stub->RunClient(runsc::AllocContext(&contexts));
     if (!clients[i].stream->Write(args)) {
       gpr_log(GPR_ERROR, "Could not write args to client %zu", i);
     }
@@ -439,7 +439,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
     result->add_client_success(s.ok());
     if (!s.ok()) {
       gpr_log(GPR_ERROR, "Client %zu had an error %s", i,
-	      s.error_message().c_str());
+              s.error_message().c_str());
     }
   }
   delete[] clients;
@@ -475,7 +475,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
     result->add_server_success(s.ok());
     if (!s.ok()) {
       gpr_log(GPR_ERROR, "Server %zu had an error %s", i,
-	      s.error_message().c_str());
+              s.error_message().c_str());
     }
   }
 
@@ -497,8 +497,8 @@ bool RunQuit() {
     ctx.set_fail_fast(false);
     Status s = stub->QuitWorker(&ctx, dummy, &dummy);
     if (!s.ok()) {
-      gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s",
-	      i, s.error_message().c_str());
+      gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i,
+              s.error_message().c_str());
       result = false;
     }
   }
diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc
index e147734f7aab9feab51b91c6d66dd51fa1928e02..d3e53fe14a63af8e1ee758af9f0d2211c50ee01d 100644
--- a/test/cpp/qps/qps_worker.cc
+++ b/test/cpp/qps/qps_worker.cc
@@ -222,7 +222,7 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
       }
       *status.mutable_stats() = client->Mark(args.mark().reset());
       if (!stream->Write(status)) {
-	return Status(StatusCode::UNKNOWN, "Client couldn't respond to mark");
+        return Status(StatusCode::UNKNOWN, "Client couldn't respond to mark");
       }
       gpr_log(GPR_INFO, "RunClientBody: Mark response given");
     }
@@ -267,7 +267,7 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
       }
       *status.mutable_stats() = server->Mark(args.mark().reset());
       if (!stream->Write(status)) {
-	return Status(StatusCode::UNKNOWN, "Server couldn't respond to mark");
+        return Status(StatusCode::UNKNOWN, "Server couldn't respond to mark");
       }
       gpr_log(GPR_INFO, "RunServerBody: Mark response given");
     }
diff --git a/test/cpp/qps/server_async.cc b/test/cpp/qps/server_async.cc
index da1a289e02cb7ced47f0ddb096751e41d00b9fbf..7e663ee0c209e2b5c7e15203453db76043612046 100644
--- a/test/cpp/qps/server_async.cc
+++ b/test/cpp/qps/server_async.cc
@@ -132,8 +132,7 @@ class AsyncQpsServerTest : public Server {
       (*ss)->shutdown = true;
     }
     // TODO (vpai): Remove this deadline and allow Shutdown to finish properly
-    auto deadline =
-        std::chrono::system_clock::now() + std::chrono::seconds(3);
+    auto deadline = std::chrono::system_clock::now() + std::chrono::seconds(3);
     server_->Shutdown(deadline);
     for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) {
       (*cq)->Shutdown();
@@ -164,7 +163,9 @@ class AsyncQpsServerTest : public Server {
       // Proceed while holding a lock to make sure that
       // this thread isn't supposed to shut down
       std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
-      if (shutdown_state_[thread_idx]->shutdown) { return; }
+      if (shutdown_state_[thread_idx]->shutdown) {
+        return;
+      }
       const bool still_going = ctx->RunNextState(ok);
       // if this RPC context is done, refresh it
       if (!still_going) {