Skip to content
Snippets Groups Projects
Commit 63b28447 authored by Vijay Pai's avatar Vijay Pai
Browse files

Remove the command-line flags

parent 490e5023
No related branches found
No related tags found
No related merge requests found
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <mutex> #include <mutex>
#include <thread> #include <thread>
#include <gflags/gflags.h>
#include <grpc++/channel.h> #include <grpc++/channel.h>
#include <grpc++/client_context.h> #include <grpc++/client_context.h>
#include <grpc++/create_channel.h> #include <grpc++/create_channel.h>
...@@ -56,15 +55,8 @@ using grpc::testing::EchoRequest; ...@@ -56,15 +55,8 @@ using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse; using grpc::testing::EchoResponse;
using std::chrono::system_clock; using std::chrono::system_clock;
// In some distros, gflags is in the namespace google, and in some others, const int kNumThreads = 100; // Number of threads
// in gflags. This hack is enabling us to find both. const int kNumRpcs = 1000; // Number of RPCs per thread
namespace google {}
namespace gflags {}
using namespace google;
using namespace gflags;
DEFINE_int32(num_threads, 100, "Number of threads");
DEFINE_int32(num_rpcs, 1000, "Number of RPCs per thread");
namespace grpc { namespace grpc {
namespace testing { namespace testing {
...@@ -239,11 +231,11 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) { ...@@ -239,11 +231,11 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
TEST_F(End2endTest, ThreadStress) { TEST_F(End2endTest, ThreadStress) {
common_.ResetStub(); common_.ResetStub();
std::vector<std::thread*> threads; std::vector<std::thread*> threads;
for (int i = 0; i < FLAGS_num_threads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
threads.push_back( threads.push_back(
new std::thread(SendRpc, common_.GetStub(), FLAGS_num_rpcs)); new std::thread(SendRpc, common_.GetStub(), kNumRpcs));
} }
for (int i = 0; i < FLAGS_num_threads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
threads[i]->join(); threads[i]->join();
delete threads[i]; delete threads[i];
} }
...@@ -324,22 +316,22 @@ class AsyncClientEnd2endTest : public ::testing::Test { ...@@ -324,22 +316,22 @@ class AsyncClientEnd2endTest : public ::testing::Test {
TEST_F(AsyncClientEnd2endTest, ThreadStress) { TEST_F(AsyncClientEnd2endTest, ThreadStress) {
common_.ResetStub(); common_.ResetStub();
std::vector<std::thread*> send_threads, completion_threads; std::vector<std::thread*> send_threads, completion_threads;
for (int i = 0; i < FLAGS_num_threads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
completion_threads.push_back(new std::thread( completion_threads.push_back(new std::thread(
&AsyncClientEnd2endTest_ThreadStress_Test::AsyncCompleteRpc, this)); &AsyncClientEnd2endTest_ThreadStress_Test::AsyncCompleteRpc, this));
} }
for (int i = 0; i < FLAGS_num_threads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
send_threads.push_back( send_threads.push_back(
new std::thread(&AsyncClientEnd2endTest_ThreadStress_Test::AsyncSendRpc, new std::thread(&AsyncClientEnd2endTest_ThreadStress_Test::AsyncSendRpc,
this, FLAGS_num_rpcs)); this, kNumRpcs));
} }
for (int i = 0; i < FLAGS_num_threads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
send_threads[i]->join(); send_threads[i]->join();
delete send_threads[i]; delete send_threads[i];
} }
Wait(); Wait();
for (int i = 0; i < FLAGS_num_threads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
completion_threads[i]->join(); completion_threads[i]->join();
delete completion_threads[i]; delete completion_threads[i];
} }
...@@ -349,7 +341,6 @@ TEST_F(AsyncClientEnd2endTest, ThreadStress) { ...@@ -349,7 +341,6 @@ TEST_F(AsyncClientEnd2endTest, ThreadStress) {
} // namespace grpc } // namespace grpc
int main(int argc, char** argv) { int main(int argc, char** argv) {
ParseCommandLineFlags(&argc, &argv, true);
grpc_test_init(argc, argv); grpc_test_init(argc, argv);
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS(); return RUN_ALL_TESTS();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment