diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index 776d94d3b6198e05ed54c1bbf36a0f4fec159fc9..f71e557450d237d1b5c0ac38f575bd239489f907 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -97,9 +97,12 @@ class ClientLbEnd2endTest : public ::testing::Test {
     }
   }
 
-  void StartServers(int num_servers) {
-    for (int i = 0; i < num_servers; ++i) {
-      servers_.emplace_back(new ServerData(server_host_));
+  void StartServers(size_t num_servers,
+                    std::vector<int> ports = std::vector<int>()) {
+    for (size_t i = 0; i < num_servers; ++i) {
+      int port = 0;
+      if (ports.size() == num_servers) port = ports[i];
+      servers_.emplace_back(new ServerData(server_host_, port));
     }
   }
 
@@ -146,14 +149,18 @@ class ClientLbEnd2endTest : public ::testing::Test {
     stub_ = grpc::testing::EchoTestService::NewStub(channel_);
   }
 
-  void SendRpc() {
+  void SendRpc(bool expect_ok = true) {
     EchoRequest request;
     EchoResponse response;
     request.set_message("Live long and prosper.");
     ClientContext context;
     Status status = stub_->Echo(&context, request, &response);
-    EXPECT_TRUE(status.ok());
-    EXPECT_EQ(response.message(), request.message());
+    if (expect_ok) {
+      EXPECT_TRUE(status.ok());
+      EXPECT_EQ(response.message(), request.message());
+    } else {
+      EXPECT_FALSE(status.ok());
+    }
   }
 
   struct ServerData {
@@ -162,8 +169,8 @@ class ClientLbEnd2endTest : public ::testing::Test {
     MyTestServiceImpl service_;
     std::unique_ptr<std::thread> thread_;
 
-    explicit ServerData(const grpc::string& server_host) {
-      port_ = grpc_pick_unused_port_or_die();
+    explicit ServerData(const grpc::string& server_host, int port = 0) {
+      port_ = port > 0 ? port : grpc_pick_unused_port_or_die();
       gpr_log(GPR_INFO, "starting server on port %d", port_);
       std::mutex mu;
       std::condition_variable cond;
@@ -187,9 +194,9 @@ class ClientLbEnd2endTest : public ::testing::Test {
       cond->notify_one();
     }
 
-    void Shutdown() {
+    void Shutdown(bool join = true) {
       server_->Shutdown();
-      thread_->join();
+      if (join) thread_->join();
     }
   };
 
@@ -456,6 +463,39 @@ TEST_F(ClientLbEnd2endTest, RoundRobinManyUpdates) {
   EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
 }
 
+TEST_F(ClientLbEnd2endTest, RoundRobinReconnect) {
+  // Start servers and send one RPC per server.
+  const int kNumServers = 1;
+  std::vector<int> ports;
+  ports.push_back(grpc_pick_unused_port_or_die());
+  StartServers(kNumServers, ports);
+  ResetStub("round_robin");
+  SetNextResolution(ports);
+  // Send one RPC per backend and make sure they are used in order.
+  // Note: This relies on the fact that the subchannels are reported in
+  // state READY in the order in which the addresses are specified,
+  // which is only true because the backends are all local.
+  for (size_t i = 0; i < servers_.size(); ++i) {
+    SendRpc();
+    EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
+  }
+  // Check LB policy name for the channel.
+  EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
+
+  // Kill all servers
+  for (size_t i = 0; i < servers_.size(); ++i) {
+    servers_[i]->Shutdown(false);
+  }
+  // Client request should fail.
+  SendRpc(false);
+
+  // Bring servers back up on the same port (we aren't recreating the channel).
+  StartServers(kNumServers, ports);
+
+  // Client request should succeed.
+  SendRpc();
+}
+
 }  // namespace
 }  // namespace testing
 }  // namespace grpc