diff --git a/test/cpp/microbenchmarks/bm_fullstack.cc b/test/cpp/microbenchmarks/bm_fullstack.cc
index c3e96c572c8506defd60dcdf94253d88e600bebc..611ee8ddb3afb2c375d321bd352a3f62f8904159 100644
--- a/test/cpp/microbenchmarks/bm_fullstack.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack.cc
@@ -439,6 +439,194 @@ static void BM_UnaryPingPong(benchmark::State& state) {
                           state.range(1) * state.iterations());
 }
 
+// Repeatedly makes Streaming Bidi calls (exchanging a configurable number of
+// messages in each call) in a loop on a single channel
+//
+//  First parmeter (i.e state.range(0)):  Message size (in bytes) to use
+//  Second parameter (i.e state.range(1)): Number of ping pong messages.
+//      Note: One ping-pong means two messages (one from client to server and
+//      the other from server to client):
+template <class Fixture, class ClientContextMutator, class ServerContextMutator>
+static void BM_StreamingPingPong(benchmark::State& state) {
+  const int msg_size = state.range(0);
+  const int max_ping_pongs = state.range(1);
+
+  EchoTestService::AsyncService service;
+  std::unique_ptr<Fixture> fixture(new Fixture(&service));
+  {
+    EchoResponse send_response;
+    EchoResponse recv_response;
+    EchoRequest send_request;
+    EchoRequest recv_request;
+
+    if (msg_size > 0) {
+      send_request.set_message(std::string(msg_size, 'a'));
+      send_response.set_message(std::string(msg_size, 'b'));
+    }
+
+    std::unique_ptr<EchoTestService::Stub> stub(
+        EchoTestService::NewStub(fixture->channel()));
+
+    while (state.KeepRunning()) {
+      ServerContext svr_ctx;
+      ServerContextMutator svr_ctx_mut(&svr_ctx);
+      ServerAsyncReaderWriter<EchoResponse, EchoRequest> response_rw(&svr_ctx);
+      service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(),
+                                fixture->cq(), tag(0));
+
+      ClientContext cli_ctx;
+      ClientContextMutator cli_ctx_mut(&cli_ctx);
+      auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1));
+
+      // Establish async stream between client side and server side
+      void* t;
+      bool ok;
+      int need_tags = (1 << 0) | (1 << 1);
+      while (need_tags) {
+        GPR_ASSERT(fixture->cq()->Next(&t, &ok));
+        GPR_ASSERT(ok);
+        int i = (int)(intptr_t)t;
+        GPR_ASSERT(need_tags & (1 << i));
+        need_tags &= ~(1 << i);
+      }
+
+      // Send 'max_ping_pongs' number of ping pong messages
+      int ping_pong_cnt = 0;
+      while (ping_pong_cnt < max_ping_pongs) {
+        request_rw->Write(send_request, tag(0));   // Start client send
+        response_rw.Read(&recv_request, tag(1));   // Start server recv
+        request_rw->Read(&recv_response, tag(2));  // Start client recv
+
+        need_tags = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3);
+        while (need_tags) {
+          GPR_ASSERT(fixture->cq()->Next(&t, &ok));
+          GPR_ASSERT(ok);
+          int i = (int)(intptr_t)t;
+
+          // If server recv is complete, start the server send operation
+          if (i == 1) {
+            response_rw.Write(send_response, tag(3));
+          }
+
+          GPR_ASSERT(need_tags & (1 << i));
+          need_tags &= ~(1 << i);
+        }
+
+        ping_pong_cnt++;
+      }
+
+      request_rw->WritesDone(tag(0));
+      response_rw.Finish(Status::OK, tag(1));
+
+      Status recv_status;
+      request_rw->Finish(&recv_status, tag(2));
+
+      need_tags = (1 << 0) | (1 << 1) | (1 << 2);
+      while (need_tags) {
+        GPR_ASSERT(fixture->cq()->Next(&t, &ok));
+        int i = (int)(intptr_t)t;
+        GPR_ASSERT(need_tags & (1 << i));
+        need_tags &= ~(1 << i);
+      }
+
+      GPR_ASSERT(recv_status.ok());
+    }
+  }
+
+  fixture->Finish(state);
+  fixture.reset();
+  state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2);
+}
+
+// Repeatedly sends ping pong messages in a single streaming Bidi call in a loop
+//     First parmeter (i.e state.range(0)):  Message size (in bytes) to use
+template <class Fixture, class ClientContextMutator, class ServerContextMutator>
+static void BM_StreamingPingPongMsgs(benchmark::State& state) {
+  const int msg_size = state.range(0);
+  int ping_pong_cnt = 0;
+
+  EchoTestService::AsyncService service;
+  std::unique_ptr<Fixture> fixture(new Fixture(&service));
+  {
+    EchoResponse send_response;
+    EchoResponse recv_response;
+    EchoRequest send_request;
+    EchoRequest recv_request;
+
+    if (msg_size > 0) {
+      send_request.set_message(std::string(msg_size, 'a'));
+      send_response.set_message(std::string(msg_size, 'b'));
+    }
+
+    std::unique_ptr<EchoTestService::Stub> stub(
+        EchoTestService::NewStub(fixture->channel()));
+
+    ServerContext svr_ctx;
+    ServerContextMutator svr_ctx_mut(&svr_ctx);
+    ServerAsyncReaderWriter<EchoResponse, EchoRequest> response_rw(&svr_ctx);
+    service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(),
+                              fixture->cq(), tag(0));
+
+    ClientContext cli_ctx;
+    ClientContextMutator cli_ctx_mut(&cli_ctx);
+    auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1));
+
+    // Establish async stream between client side and server side
+    void* t;
+    bool ok;
+    int need_tags = (1 << 0) | (1 << 1);
+    while (need_tags) {
+      GPR_ASSERT(fixture->cq()->Next(&t, &ok));
+      GPR_ASSERT(ok);
+      int i = (int)(intptr_t)t;
+      GPR_ASSERT(need_tags & (1 << i));
+      need_tags &= ~(1 << i);
+    }
+
+    while (state.KeepRunning()) {
+      request_rw->Write(send_request, tag(0));   // Start client send
+      response_rw.Read(&recv_request, tag(1));   // Start server recv
+      request_rw->Read(&recv_response, tag(2));  // Start client recv
+
+      need_tags = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3);
+      while (need_tags) {
+        GPR_ASSERT(fixture->cq()->Next(&t, &ok));
+        GPR_ASSERT(ok);
+        int i = (int)(intptr_t)t;
+
+        // If server recv is complete, start the server send operation
+        if (i == 1) {
+          response_rw.Write(send_response, tag(3));
+        }
+
+        GPR_ASSERT(need_tags & (1 << i));
+        need_tags &= ~(1 << i);
+      }
+
+      ping_pong_cnt++;
+    }
+
+    request_rw->WritesDone(tag(0));
+    response_rw.Finish(Status::OK, tag(1));
+    Status recv_status;
+    request_rw->Finish(&recv_status, tag(2));
+
+    need_tags = (1 << 0) | (1 << 1) | (1 << 2);
+    while (need_tags) {
+      GPR_ASSERT(fixture->cq()->Next(&t, &ok));
+      int i = (int)(intptr_t)t;
+      GPR_ASSERT(need_tags & (1 << i));
+      need_tags &= ~(1 << i);
+    }
+
+    GPR_ASSERT(recv_status.ok());
+  }
+
+  fixture->Finish(state);
+  fixture.reset();
+  state.SetBytesProcessed(msg_size * state.iterations() * ping_pong_cnt * 2);
+}
+
 template <class Fixture>
 static void BM_PumpStreamClientToServer(benchmark::State& state) {
   EchoTestService::AsyncService service;
@@ -642,6 +830,25 @@ BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, SockPair)
 BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcessCHTTP2)
     ->Range(0, 128 * 1024 * 1024);
 
+static void StreamingPingPongArgs(benchmark::internal::Benchmark* b) {
+  int msg_size = 0;
+  int num_ping_pongs = 0;
+  for (msg_size = 0; msg_size <= 128 * 1024 * 1024;
+       msg_size == 0 ? msg_size++ : msg_size *= 8) {
+    for (num_ping_pongs = 0; num_ping_pongs <= 2; num_ping_pongs++) {
+      b->Args({msg_size, num_ping_pongs});
+    }
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_StreamingPingPong, InProcessCHTTP2, NoOpMutator,
+                   NoOpMutator)
+    ->Apply(StreamingPingPongArgs);
+
+BENCHMARK_TEMPLATE(BM_StreamingPingPongMsgs, InProcessCHTTP2, NoOpMutator,
+                   NoOpMutator)
+    ->Range(0, 128 * 1024 * 1024);
+
 }  // namespace testing
 }  // namespace grpc