diff --git a/INSTALL.md b/INSTALL.md
index 6cfa1b6cbaf34ad1c9b6557cf2b75e159ebd611d..5406fec84dbf73f3e474f995670702996a514b8c 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -51,6 +51,15 @@ If you plan to build from source and run tests, install the following as well:
  $ brew install gflags
 ```
 
+*Tip*: when building, 
+you *may* want to explicitly set the `LIBTOOL` and `LIBTOOLIZE`
+environment variables when running `make` to ensure the version
+installed by `brew` is being used:
+
+```sh
+ $ LIBTOOL=glibtool LIBTOOLIZE=glibtoolize make
+```
+
 ## Protoc
 
 By default gRPC uses [protocol buffers](https://github.com/google/protobuf),
diff --git a/composer.json b/composer.json
index 0cafb94808c6887883980495a2f340ee77c158bb..284b57a8098158bed6b57a4d5a473e6fb6021a69 100644
--- a/composer.json
+++ b/composer.json
@@ -7,7 +7,7 @@
   "license": "BSD-3-Clause",
   "require": {
     "php": ">=5.5.0",
-    "google/protobuf": "^v3.1.0"
+    "google/protobuf": "^v3.3.0"
   },
   "require-dev": {
     "google/auth": "v0.9"
diff --git a/doc/unit_testing.md b/doc/unit_testing.md
new file mode 100644
index 0000000000000000000000000000000000000000..380517fea5242e30e50853ea1fc853938ec72f8e
--- /dev/null
+++ b/doc/unit_testing.md
@@ -0,0 +1,175 @@
+# How to write unit tests for gRPC C++ client.
+
+tl;dr: [Example code](https://github.com/grpc/grpc/blob/master/test/cpp/end2end/mock_test.cc).
+
+To unit-test client-side logic via the synchronous API, gRPC provides a mocked Stub based on googletest(googlemock) that can be programmed upon and easily incorporated in the test code. 
+
+For instance, consider an EchoService like this:
+
+
+```proto
+service EchoTestService {
+        rpc Echo(EchoRequest) returns (EchoResponse);
+        rpc BidiStream(stream EchoRequest) returns (stream EchoResponse);
+}
+```
+
+The code generated would look something like this:
+
+```c++
+class EchoTestService final {
+  public:
+  class StubInterface {
+    virtual ::grpc::Status Echo(::grpc::ClientContext* context, const ::grpc::testing::EchoRequest& request, ::grpc::testing::EchoResponse* response) = 0;
+  …
+    std::unique_ptr< ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>> BidiStream(::grpc::ClientContext* context) {
+      return std::unique_ptr< ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>>(BidiStreamRaw(context));
+    }
+  …
+    private:
+    virtual ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>* BidiStreamRaw(::grpc::ClientContext* context) = 0;
+  …
+  } // End StubInterface
+…
+} // End EchoTestService
+```
+
+
+If we mock the StubInterface and set expectations on the pure-virtual methods we can test client-side logic without having to make any rpcs.
+
+A mock for this StubInterface will look like this:
+
+
+```c++
+class MockEchoTestServiceStub : public EchoTestService::StubInterface {
+ public:
+  MOCK_METHOD3(Echo, ::grpc::Status(::grpc::ClientContext* context, const ::grpc::testing::EchoRequest& request, ::grpc::testing::EchoResponse* response));
+  MOCK_METHOD1(BidiStreamRaw, ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>*(::grpc::ClientContext* context));
+};
+```
+
+
+**Generating mock code:**
+
+Such a mock can be auto-generated by:
+
+
+
+1.  Setting flag(generate_mock_code=true) on grpc plugin for protoc, or
+1.  Setting an attribute(generate_mock) in your bazel rule.
+
+Protoc plugin flag:
+
+```sh
+protoc -I . --grpc_out=generate_mock_code=true:. --plugin=protoc-gen-grpc=`which grpc_cpp_plugin` echo.proto
+```
+
+Bazel rule:
+
+```py
+grpc_proto_library(
+  name = "echo_proto",
+  srcs = ["echo.proto"],
+  generate_mock = True, 
+)
+```
+
+
+By adding such a flag now a header file `echo_mock.grpc.pb.h` containing the mocked stub will also be generated. 
+
+This header file can then be included in test files along with a gmock dependency.
+
+**Writing tests with mocked Stub.**
+
+Consider the following client a user might have:
+
+```c++
+class FakeClient {
+ public:
+  explicit FakeClient(EchoTestService::StubInterface* stub) : stub_(stub) {}
+
+  void DoEcho() {
+    ClientContext context;
+    EchoRequest request;
+    EchoResponse response;
+    request.set_message("hello world");
+    Status s = stub_->Echo(&context, request, &response);
+    EXPECT_EQ(request.message(), response.message());
+    EXPECT_TRUE(s.ok());
+  }
+
+  void DoBidiStream() {
+    EchoRequest request;
+    EchoResponse response;
+    ClientContext context;
+    grpc::string msg("hello");
+
+    std::unique_ptr<ClientReaderWriterInterface<EchoRequest, EchoResponse>>
+        stream = stub_->BidiStream(&context);
+
+    request.set_message(msg + "0");
+    EXPECT_TRUE(stream->Write(request));
+    EXPECT_TRUE(stream->Read(&response));
+    EXPECT_EQ(response.message(), request.message());
+
+    request.set_message(msg + "1");
+    EXPECT_TRUE(stream->Write(request));
+    EXPECT_TRUE(stream->Read(&response));
+    EXPECT_EQ(response.message(), request.message());
+
+    request.set_message(msg + "2");
+    EXPECT_TRUE(stream->Write(request));
+    EXPECT_TRUE(stream->Read(&response));
+    EXPECT_EQ(response.message(), request.message());
+
+    stream->WritesDone();
+    EXPECT_FALSE(stream->Read(&response));
+
+    Status s = stream->Finish();
+    EXPECT_TRUE(s.ok());
+  }
+
+  void ResetStub(EchoTestService::StubInterface* stub) { stub_ = stub; }
+
+ private:
+  EchoTestService::StubInterface* stub_;
+};
+```
+
+A test could initialize this FakeClient with a mocked stub having set expectations on it:
+
+Unary RPC:
+
+```c++
+MockEchoTestServiceStub stub;
+EchoResponse resp;
+resp.set_message("hello world");
+Expect_CALL(stub, Echo(_,_,_)).Times(Atleast(1)).WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK)));
+FakeClient client(stub);
+client.DoEcho();
+```
+
+Streaming RPC:
+
+```c++
+ACTION_P(copy, msg) {
+  arg0->set_message(msg->message());
+}
+
+
+auto rw = new MockClientReaderWriter<EchoRequest, EchoResponse>();
+EchoRequest msg;
+EXPECT_CALL(*rw, Write(_, _)).Times(3).WillRepeatedly(DoAll(SaveArg<0>(&msg), Return(true)));
+EXPECT_CALL(*rw, Read(_)).
+      WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true))).
+      WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true))).
+      WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true))).
+      WillOnce(Return(false));
+
+MockEchoTestServiceStub  stub;
+EXPECT_CALL(stub, BidiStreamRaw(_)).Times(AtLeast(1)).WillOnce(Return(rw));
+
+FakeClient client(stub);
+client.DoBidiStream();
+```
+
diff --git a/include/grpc++/impl/codegen/config_protobuf.h b/include/grpc++/impl/codegen/config_protobuf.h
index 8620d4b2183bfc00b7ab555e6cf43dcfd37bffcf..e66189f8d127db128c34cb7fc9222a067f2a880f 100644
--- a/include/grpc++/impl/codegen/config_protobuf.h
+++ b/include/grpc++/impl/codegen/config_protobuf.h
@@ -34,6 +34,8 @@
 #ifndef GRPCXX_IMPL_CODEGEN_CONFIG_PROTOBUF_H
 #define GRPCXX_IMPL_CODEGEN_CONFIG_PROTOBUF_H
 
+#define GRPC_OPEN_SOURCE_PROTO
+
 #ifndef GRPC_CUSTOM_PROTOBUF_INT64
 #include <google/protobuf/stubs/common.h>
 #define GRPC_CUSTOM_PROTOBUF_INT64 ::google::protobuf::int64
diff --git a/include/grpc++/impl/codegen/core_codegen.h b/include/grpc++/impl/codegen/core_codegen.h
index a1593729ebc81368deaeaee4b35f2c26cbc79714..7a1236a716be8c6823a08a4013396c5947bc2af1 100644
--- a/include/grpc++/impl/codegen/core_codegen.h
+++ b/include/grpc++/impl/codegen/core_codegen.h
@@ -90,11 +90,15 @@ class CoreCodegen final : public CoreCodegenInterface {
 
   grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
                                                 size_t nslices) override;
-
+  grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
+                                           void (*destroy)(void*),
+                                           void* user_data) override;
   grpc_slice grpc_empty_slice() override;
   grpc_slice grpc_slice_malloc(size_t length) override;
   void grpc_slice_unref(grpc_slice slice) override;
+  grpc_slice grpc_slice_ref(grpc_slice slice) override;
   grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split) override;
+  grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split) override;
   void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice slice) override;
   void grpc_slice_buffer_pop(grpc_slice_buffer* sb) override;
   grpc_slice grpc_slice_from_static_buffer(const void* buffer,
diff --git a/include/grpc++/impl/codegen/core_codegen_interface.h b/include/grpc++/impl/codegen/core_codegen_interface.h
index 7cc3a824761cb26be1677fa6c11adb52b6f7b07e..4ac37f52559d542eedd039eeb7b7440d7750fe9e 100644
--- a/include/grpc++/impl/codegen/core_codegen_interface.h
+++ b/include/grpc++/impl/codegen/core_codegen_interface.h
@@ -101,15 +101,18 @@ class CoreCodegenInterface {
 
   virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
                                                         size_t nslices) = 0;
-
+  virtual grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
+                                                   void (*destroy)(void*),
+                                                   void* user_data) = 0;
   virtual void grpc_call_ref(grpc_call* call) = 0;
   virtual void grpc_call_unref(grpc_call* call) = 0;
   virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) = 0;
-
   virtual grpc_slice grpc_empty_slice() = 0;
   virtual grpc_slice grpc_slice_malloc(size_t length) = 0;
   virtual void grpc_slice_unref(grpc_slice slice) = 0;
+  virtual grpc_slice grpc_slice_ref(grpc_slice slice) = 0;
   virtual grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split) = 0;
+  virtual grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split) = 0;
   virtual void grpc_slice_buffer_add(grpc_slice_buffer* sb,
                                      grpc_slice slice) = 0;
   virtual void grpc_slice_buffer_pop(grpc_slice_buffer* sb) = 0;
diff --git a/include/grpc++/impl/codegen/proto_utils.h b/include/grpc++/impl/codegen/proto_utils.h
index 8c0c32bfc5d9bf757f769851d471a6f2f051c5c3..5cdb2a7a155edfe9b641f451ab9d609cdfbb17c2 100644
--- a/include/grpc++/impl/codegen/proto_utils.h
+++ b/include/grpc++/impl/codegen/proto_utils.h
@@ -54,8 +54,7 @@ class GrpcBufferWriterPeer;
 
 const int kGrpcBufferWriterMaxBufferLength = 1024 * 1024;
 
-class GrpcBufferWriter final
-    : public ::grpc::protobuf::io::ZeroCopyOutputStream {
+class GrpcBufferWriter : public ::grpc::protobuf::io::ZeroCopyOutputStream {
  public:
   explicit GrpcBufferWriter(grpc_byte_buffer** bp, int block_size)
       : block_size_(block_size), byte_count_(0), have_backup_(false) {
@@ -103,6 +102,8 @@ class GrpcBufferWriter final
 
   grpc::protobuf::int64 ByteCount() const override { return byte_count_; }
 
+  grpc_slice_buffer* SliceBuffer() { return slice_buffer_; }
+
  private:
   friend class GrpcBufferWriterPeer;
   const int block_size_;
@@ -113,8 +114,7 @@ class GrpcBufferWriter final
   grpc_slice slice_;
 };
 
-class GrpcBufferReader final
-    : public ::grpc::protobuf::io::ZeroCopyInputStream {
+class GrpcBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream {
  public:
   explicit GrpcBufferReader(grpc_byte_buffer* buffer)
       : byte_count_(0), backup_count_(0), status_() {
@@ -175,64 +175,91 @@ class GrpcBufferReader final
     return byte_count_ - backup_count_;
   }
 
- private:
+ protected:
   int64_t byte_count_;
   int64_t backup_count_;
   grpc_byte_buffer_reader reader_;
   grpc_slice slice_;
   Status status_;
 };
+
+template <class BufferWriter, class T>
+Status GenericSerialize(const grpc::protobuf::Message& msg,
+                        grpc_byte_buffer** bp, bool* own_buffer) {
+  static_assert(
+      std::is_base_of<protobuf::io::ZeroCopyOutputStream, BufferWriter>::value,
+      "BufferWriter must be a subclass of io::ZeroCopyOutputStream");
+  *own_buffer = true;
+  int byte_size = msg.ByteSize();
+  if (byte_size <= internal::kGrpcBufferWriterMaxBufferLength) {
+    grpc_slice slice = g_core_codegen_interface->grpc_slice_malloc(byte_size);
+    GPR_CODEGEN_ASSERT(
+        GRPC_SLICE_END_PTR(slice) ==
+        msg.SerializeWithCachedSizesToArray(GRPC_SLICE_START_PTR(slice)));
+    *bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(&slice, 1);
+    g_core_codegen_interface->grpc_slice_unref(slice);
+    return g_core_codegen_interface->ok();
+  } else {
+    BufferWriter writer(bp, internal::kGrpcBufferWriterMaxBufferLength);
+    return msg.SerializeToZeroCopyStream(&writer)
+               ? g_core_codegen_interface->ok()
+               : Status(StatusCode::INTERNAL, "Failed to serialize message");
+  }
+}
+
+template <class BufferReader, class T>
+Status GenericDeserialize(grpc_byte_buffer* buffer,
+                          grpc::protobuf::Message* msg) {
+  static_assert(
+      std::is_base_of<protobuf::io::ZeroCopyInputStream, BufferReader>::value,
+      "BufferReader must be a subclass of io::ZeroCopyInputStream");
+  if (buffer == nullptr) {
+    return Status(StatusCode::INTERNAL, "No payload");
+  }
+  Status result = g_core_codegen_interface->ok();
+  {
+    BufferReader reader(buffer);
+    if (!reader.status().ok()) {
+      return reader.status();
+    }
+    ::grpc::protobuf::io::CodedInputStream decoder(&reader);
+    decoder.SetTotalBytesLimit(INT_MAX, INT_MAX);
+    if (!msg->ParseFromCodedStream(&decoder)) {
+      result = Status(StatusCode::INTERNAL, msg->InitializationErrorString());
+    }
+    if (!decoder.ConsumedEntireMessage()) {
+      result = Status(StatusCode::INTERNAL, "Did not read entire message");
+    }
+  }
+  g_core_codegen_interface->grpc_byte_buffer_destroy(buffer);
+  return result;
+}
+
 }  // namespace internal
 
+// this is needed so the following class does not conflict with protobuf
+// serializers that utilize internal-only tools.
+#ifdef GRPC_OPEN_SOURCE_PROTO
+// This class provides a protobuf serializer. It translates between protobuf
+// objects and grpc_byte_buffers. More information about SerializationTraits can
+// be found in include/grpc++/impl/codegen/serialization_traits.h.
 template <class T>
 class SerializationTraits<T, typename std::enable_if<std::is_base_of<
                                  grpc::protobuf::Message, T>::value>::type> {
  public:
   static Status Serialize(const grpc::protobuf::Message& msg,
                           grpc_byte_buffer** bp, bool* own_buffer) {
-    *own_buffer = true;
-    int byte_size = msg.ByteSize();
-    if (byte_size <= internal::kGrpcBufferWriterMaxBufferLength) {
-      grpc_slice slice = g_core_codegen_interface->grpc_slice_malloc(byte_size);
-      GPR_CODEGEN_ASSERT(
-          GRPC_SLICE_END_PTR(slice) ==
-          msg.SerializeWithCachedSizesToArray(GRPC_SLICE_START_PTR(slice)));
-      *bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(&slice, 1);
-      g_core_codegen_interface->grpc_slice_unref(slice);
-      return g_core_codegen_interface->ok();
-    } else {
-      internal::GrpcBufferWriter writer(
-          bp, internal::kGrpcBufferWriterMaxBufferLength);
-      return msg.SerializeToZeroCopyStream(&writer)
-                 ? g_core_codegen_interface->ok()
-                 : Status(StatusCode::INTERNAL, "Failed to serialize message");
-    }
+    return internal::GenericSerialize<internal::GrpcBufferWriter, T>(
+        msg, bp, own_buffer);
   }
 
   static Status Deserialize(grpc_byte_buffer* buffer,
                             grpc::protobuf::Message* msg) {
-    if (buffer == nullptr) {
-      return Status(StatusCode::INTERNAL, "No payload");
-    }
-    Status result = g_core_codegen_interface->ok();
-    {
-      internal::GrpcBufferReader reader(buffer);
-      if (!reader.status().ok()) {
-        return reader.status();
-      }
-      ::grpc::protobuf::io::CodedInputStream decoder(&reader);
-      decoder.SetTotalBytesLimit(INT_MAX, INT_MAX);
-      if (!msg->ParseFromCodedStream(&decoder)) {
-        result = Status(StatusCode::INTERNAL, msg->InitializationErrorString());
-      }
-      if (!decoder.ConsumedEntireMessage()) {
-        result = Status(StatusCode::INTERNAL, "Did not read entire message");
-      }
-    }
-    g_core_codegen_interface->grpc_byte_buffer_destroy(buffer);
-    return result;
+    return internal::GenericDeserialize<internal::GrpcBufferReader, T>(buffer,
+                                                                       msg);
   }
 };
+#endif
 
 }  // namespace grpc
 
diff --git a/setup.py b/setup.py
index 18ba802fb0f8762777b128546a5b4156e10b7179..cb852735f0ff5fe503d5e81edca1ba876d0b325b 100644
--- a/setup.py
+++ b/setup.py
@@ -116,7 +116,7 @@ if EXTRA_ENV_COMPILE_ARGS is None:
   elif 'win32' in sys.platform:
     EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
   elif "linux" in sys.platform:
-    EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -fvisibility=hidden -fno-wrapv'
+    EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv'
   elif "darwin" in sys.platform:
     EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv'
 
@@ -237,7 +237,7 @@ INSTALL_REQUIRES = (
     'six>=1.5.2',
     # TODO(atash): eventually split the grpcio package into a metapackage
     # depending on protobuf and the runtime component (independent of protobuf)
-    'protobuf>=3.2.0',
+    'protobuf>=3.3.0',
 )
 
 if not PY3:
diff --git a/src/boringssl/gen_build_yaml.py b/src/boringssl/gen_build_yaml.py
index c53beb0da5f033f1b801859aead49bd6d6a264c0..d01c2b4ec55a1542a5e53330627b4baa963dc198 100755
--- a/src/boringssl/gen_build_yaml.py
+++ b/src/boringssl/gen_build_yaml.py
@@ -138,7 +138,7 @@ class Grpc(object):
           {
             'name': 'boringssl_%s' % os.path.basename(test[0]),
             'args': [map_testarg(arg) for arg in test[1:]],
-            'exclude_configs': ['asan'],
+            'exclude_configs': ['asan', 'ubsan'],
             'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
             'platforms': ['linux', 'mac', 'posix', 'windows'],
             'flaky': False,
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 49e80aaee93b082e7bd71161a8b729da3e876ea7..b06738446daa858e39e476cfe6815bd55b6dc603 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -2573,7 +2573,7 @@ static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
                                    add_max_recv_bytes);
     if ((int64_t)s->incoming_window_delta + (int64_t)initial_window_size -
             (int64_t)s->announce_window >
-        2 * (int64_t)initial_window_size) {
+        (int64_t)initial_window_size / 2) {
       write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
     }
     grpc_chttp2_become_writable(exec_ctx, t, s, write_type,
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index d0b94bd6cedc93da77de4325fb5cb6eef9204716..b0dd68511625e5b4734b07795cfcd08cce16c5e5 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -418,7 +418,10 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
 
     GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA("parse", t, s,
                                                         incoming_frame_size);
-    if ((int64_t)s->incoming_window_delta - (int64_t)s->announce_window <= 0) {
+    if ((int64_t)s->incoming_window_delta - (int64_t)s->announce_window <=
+        -(int64_t)t->settings[GRPC_SENT_SETTINGS]
+                             [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] /
+            2) {
       grpc_chttp2_become_writable(exec_ctx, t, s,
                                   GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
                                   "window-update-required");
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index f679a339456920d95f9b9e57e7268f98291b33b2..2b484c0a08ef07b59e23be3bcc4656306a90db02 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -134,6 +134,12 @@ grpc_byte_buffer* CoreCodegen::grpc_raw_byte_buffer_create(grpc_slice* slice,
   return ::grpc_raw_byte_buffer_create(slice, nslices);
 }
 
+grpc_slice CoreCodegen::grpc_slice_new_with_user_data(void* p, size_t len,
+                                                      void (*destroy)(void*),
+                                                      void* user_data) {
+  return ::grpc_slice_new_with_user_data(p, len, destroy, user_data);
+}
+
 grpc_slice CoreCodegen::grpc_empty_slice() { return ::grpc_empty_slice(); }
 
 grpc_slice CoreCodegen::grpc_slice_malloc(size_t length) {
@@ -144,10 +150,18 @@ void CoreCodegen::grpc_slice_unref(grpc_slice slice) {
   ::grpc_slice_unref(slice);
 }
 
+grpc_slice CoreCodegen::grpc_slice_ref(grpc_slice slice) {
+  return ::grpc_slice_ref(slice);
+}
+
 grpc_slice CoreCodegen::grpc_slice_split_tail(grpc_slice* s, size_t split) {
   return ::grpc_slice_split_tail(s, split);
 }
 
+grpc_slice CoreCodegen::grpc_slice_split_head(grpc_slice* s, size_t split) {
+  return ::grpc_slice_split_head(s, split);
+}
+
 grpc_slice CoreCodegen::grpc_slice_from_static_buffer(const void* buffer,
                                                       size_t length) {
   return ::grpc_slice_from_static_buffer(buffer, length);
diff --git a/src/node/index.js b/src/node/index.js
index 071bfd7927bab798c2bf0920355be32c54cd7242..76ab1744b0073b6b39cd490c53f101163a864a18 100644
--- a/src/node/index.js
+++ b/src/node/index.js
@@ -31,6 +31,10 @@
  *
  */
 
+/**
+ * @module
+ */
+
 'use strict';
 
 var path = require('path');
@@ -256,5 +260,10 @@ exports.getClientChannel = client.getClientChannel;
 exports.waitForClientReady = client.waitForClientReady;
 
 exports.closeClient = function closeClient(client_obj) {
-  client.getClientChannel(client_obj).close();
+  client.Client.prototype.close.apply(client_obj);
 };
+
+/**
+ * @see module:src/client.Client
+ */
+exports.Client = client.Client;
diff --git a/src/node/src/client.js b/src/node/src/client.js
index 1aaf35c16cbc918bdd220e3ba04b2832817576db..43502da6af6147e534774d5e25e2934429290b54 100644
--- a/src/node/src/client.js
+++ b/src/node/src/client.js
@@ -37,7 +37,7 @@
  * This module contains the factory method for creating Client classes, and the
  * method calling code for all types of methods.
  *
- * For example, to create a client and call a method on it:
+ * @example <caption>Create a client and call a method on it</caption>
  *
  * var proto_obj = grpc.load(proto_file_path);
  * var Client = proto_obj.package.subpackage.ServiceName;
@@ -68,14 +68,33 @@ var Duplex = stream.Duplex;
 var util = require('util');
 var version = require('../../../package.json').version;
 
+util.inherits(ClientUnaryCall, EventEmitter);
+
+/**
+ * An EventEmitter. Used for unary calls
+ * @constructor
+ * @extends external:EventEmitter
+ * @param {grpc.Call} call The call object associated with the request
+ */
+function ClientUnaryCall(call) {
+  EventEmitter.call(this);
+  this.call = call;
+}
+
 util.inherits(ClientWritableStream, Writable);
 
 /**
  * A stream that the client can write to. Used for calls that are streaming from
  * the client side.
  * @constructor
+ * @extends external:Writable
+ * @borrows module:src/client~ClientUnaryCall#cancel as
+ *     module:src/client~ClientWritableStream#cancel
+ * @borrows module:src/client~ClientUnaryCall#getPeer as
+ *     module:src/client~ClientWritableStream#getPeer
  * @param {grpc.Call} call The call object to send data with
- * @param {function(*):Buffer=} serialize Serialization function for writes.
+ * @param {module:src/common~serialize=} [serialize=identity] Serialization
+ *     function for writes.
  */
 function ClientWritableStream(call, serialize) {
   Writable.call(this, {objectMode: true});
@@ -134,8 +153,14 @@ util.inherits(ClientReadableStream, Readable);
  * A stream that the client can read from. Used for calls that are streaming
  * from the server side.
  * @constructor
+ * @extends external:Readable
+ * @borrows module:src/client~ClientUnaryCall#cancel as
+ *     module:src/client~ClientReadableStream#cancel
+ * @borrows module:src/client~ClientUnaryCall#getPeer as
+ *     module:src/client~ClientReadableStream#getPeer
  * @param {grpc.Call} call The call object to read data with
- * @param {function(Buffer):*=} deserialize Deserialization function for reads
+ * @param {module:src/common~deserialize=} [deserialize=identity]
+ *     Deserialization function for reads
  */
 function ClientReadableStream(call, deserialize) {
   Readable.call(this, {objectMode: true});
@@ -155,6 +180,7 @@ function ClientReadableStream(call, deserialize) {
  * parameter indicates that the call should end with that status. status
  * defaults to OK if not provided.
  * @param {Object!} status The status that the call should end with
+ * @access private
  */
 function _readsDone(status) {
   /* jshint validthis: true */
@@ -173,6 +199,7 @@ ClientReadableStream.prototype._readsDone = _readsDone;
 
 /**
  * Called to indicate that we have received a status from the server.
+ * @access private
  */
 function _receiveStatus(status) {
   /* jshint validthis: true */
@@ -185,6 +212,7 @@ ClientReadableStream.prototype._receiveStatus = _receiveStatus;
 /**
  * If we have both processed all incoming messages and received the status from
  * the server, emit the status. Otherwise, do nothing.
+ * @access private
  */
 function _emitStatusIfDone() {
   /* jshint validthis: true */
@@ -270,10 +298,16 @@ util.inherits(ClientDuplexStream, Duplex);
  * A stream that the client can read from or write to. Used for calls with
  * duplex streaming.
  * @constructor
+ * @extends external:Duplex
+ * @borrows module:src/client~ClientUnaryCall#cancel as
+ *     module:src/client~ClientDuplexStream#cancel
+ * @borrows module:src/client~ClientUnaryCall#getPeer as
+ *     module:src/client~ClientDuplexStream#getPeer
  * @param {grpc.Call} call Call object to proxy
- * @param {function(*):Buffer=} serialize Serialization function for requests
- * @param {function(Buffer):*=} deserialize Deserialization function for
- *     responses
+ * @param {module:src/common~serialize=} [serialize=identity] Serialization
+ *     function for requests
+ * @param {module:src/common~deserialize=} [deserialize=identity]
+ *     Deserialization function for responses
  */
 function ClientDuplexStream(call, serialize, deserialize) {
   Duplex.call(this, {objectMode: true});
@@ -300,12 +334,14 @@ ClientDuplexStream.prototype._write = _write;
 
 /**
  * Cancel the ongoing call
+ * @alias module:src/client~ClientUnaryCall#cancel
  */
 function cancel() {
   /* jshint validthis: true */
   this.call.cancel();
 }
 
+ClientUnaryCall.prototype.cancel = cancel;
 ClientReadableStream.prototype.cancel = cancel;
 ClientWritableStream.prototype.cancel = cancel;
 ClientDuplexStream.prototype.cancel = cancel;
@@ -313,21 +349,49 @@ ClientDuplexStream.prototype.cancel = cancel;
 /**
  * Get the endpoint this call/stream is connected to.
  * @return {string} The URI of the endpoint
+ * @alias module:src/client~ClientUnaryCall#getPeer
  */
 function getPeer() {
   /* jshint validthis: true */
   return this.call.getPeer();
 }
 
+ClientUnaryCall.prototype.getPeer = getPeer;
 ClientReadableStream.prototype.getPeer = getPeer;
 ClientWritableStream.prototype.getPeer = getPeer;
 ClientDuplexStream.prototype.getPeer = getPeer;
 
+/**
+ * Any client call type
+ * @typedef {(ClientUnaryCall|ClientReadableStream|
+ *            ClientWritableStream|ClientDuplexStream)}
+ *     module:src/client~Call
+ */
+
+/**
+ * Options that can be set on a call.
+ * @typedef {Object} module:src/client~CallOptions
+ * @property {(date|number)} deadline The deadline for the entire call to
+ *     complete. A value of Infinity indicates that no deadline should be set.
+ * @property {(string)} host Server hostname to set on the call. Only meaningful
+ *     if different from the server address used to construct the client.
+ * @property {module:src/client~Call} parent Parent call. Used in servers when
+ *     making a call as part of the process of handling a call. Used to
+ *     propagate some information automatically, as specified by
+ *     propagate_flags.
+ * @property {number} propagate_flags Indicates which properties of a parent
+ *     call should propagate to this call. Bitwise combination of flags in
+ *     [grpc.propagate]{@link module:index.propagate}.
+ * @property {module:src/credentials~CallCredentials} credentials The
+ *     credentials that should be used to make this particular call.
+ */
+
 /**
  * Get a call object built with the provided options. Keys for options are
  * 'deadline', which takes a date or number, and 'host', which takes a string
  * and overrides the hostname to connect to.
- * @param {Object} options Options map.
+ * @access private
+ * @param {module:src/client~CallOptions=} options Options object.
  */
 function getCall(channel, method, options) {
   var deadline;
@@ -354,315 +418,380 @@ function getCall(channel, method, options) {
 }
 
 /**
- * Get a function that can make unary requests to the specified method.
- * @param {string} method The name of the method to request
- * @param {function(*):Buffer} serialize The serialization function for inputs
- * @param {function(Buffer)} deserialize The deserialization function for
- *     outputs
- * @return {Function} makeUnaryRequest
+ * A generic gRPC client. Primarily useful as a base class for generated clients
+ * @alias module:src/client.Client
+ * @constructor
+ * @param {string} address Server address to connect to
+ * @param {module:src/credentials~ChannelCredentials} credentials Credentials to
+ *     use to connect to the server
+ * @param {Object} options Options to apply to channel creation
  */
-function makeUnaryRequestFunction(method, serialize, deserialize) {
-  /**
-   * Make a unary request with this method on the given channel with the given
-   * argument, callback, etc.
-   * @this {Client} Client object. Must have a channel member.
-   * @param {*} argument The argument to the call. Should be serializable with
-   *     serialize
-   * @param {Metadata=} metadata Metadata to add to the call
-   * @param {Object=} options Options map
-   * @param {function(?Error, value=)} callback The callback to for when the
-   *     response is received
-   * @return {EventEmitter} An event emitter for stream related events
+var Client = exports.Client = function Client(address, credentials, options) {
+  if (!options) {
+    options = {};
+  }
+  /* Append the grpc-node user agent string after the application user agent
+   * string, and put the combination at the beginning of the user agent string
    */
-  function makeUnaryRequest(argument, metadata, options, callback) {
-    /* jshint validthis: true */
-    /* While the arguments are listed in the function signature, those variables
-     * are not used directly. Instead, ArgueJS processes the arguments
-     * object. This allows for simple handling of optional arguments in the
-     * middle of the argument list, and also provides type checking. */
-    var args = arguejs({argument: null, metadata: [Metadata, new Metadata()],
-                        options: [Object], callback: Function}, arguments);
-    var emitter = new EventEmitter();
-    var call = getCall(this.$channel, method, args.options);
-    metadata = args.metadata.clone();
-    emitter.cancel = function cancel() {
-      call.cancel();
-    };
-    emitter.getPeer = function getPeer() {
-      return call.getPeer();
-    };
-    var client_batch = {};
-    var message = serialize(args.argument);
-    if (args.options) {
-      message.grpcWriteFlags = args.options.flags;
-    }
-
-    client_batch[grpc.opType.SEND_INITIAL_METADATA] =
-        metadata._getCoreRepresentation();
-    client_batch[grpc.opType.SEND_MESSAGE] = message;
-    client_batch[grpc.opType.SEND_CLOSE_FROM_CLIENT] = true;
-    client_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
-    client_batch[grpc.opType.RECV_MESSAGE] = true;
-    client_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
-    call.startBatch(client_batch, function(err, response) {
-      response.status.metadata = Metadata._fromCoreRepresentation(
-          response.status.metadata);
-      var status = response.status;
-      var error;
-      var deserialized;
-      emitter.emit('metadata', Metadata._fromCoreRepresentation(
-          response.metadata));
-      if (status.code === grpc.status.OK) {
-        if (err) {
-          // Got a batch error, but OK status. Something went wrong
-          args.callback(err);
-          return;
-        } else {
-          try {
-            deserialized = deserialize(response.read);
-          } catch (e) {
-            /* Change status to indicate bad server response. This will result
-             * in passing an error to the callback */
-            status = {
-              code: grpc.status.INTERNAL,
-              details: 'Failed to parse server response'
-            };
-          }
-        }
-      }
-      if (status.code !== grpc.status.OK) {
-        error = new Error(status.details);
-        error.code = status.code;
-        error.metadata = status.metadata;
-        args.callback(error);
-      } else {
-        args.callback(null, deserialized);
-      }
-      emitter.emit('status', status);
-    });
-    return emitter;
+  if (options['grpc.primary_user_agent']) {
+    options['grpc.primary_user_agent'] += ' ';
+  } else {
+    options['grpc.primary_user_agent'] = '';
   }
-  return makeUnaryRequest;
-}
+  options['grpc.primary_user_agent'] += 'grpc-node/' + version;
+  /* Private fields use $ as a prefix instead of _ because it is an invalid
+   * prefix of a method name */
+  this.$channel = new grpc.Channel(address, credentials, options);
+};
 
 /**
- * Get a function that can make client stream requests to the specified method.
+ * @typedef {Error} module:src/client.Client~ServiceError
+ * @property {number} code The error code, a key of
+ *     [grpc.status]{@link module:src/client.status}
+ * @property {module:metadata.Metadata} metadata Metadata sent with the status
+ *     by the server, if any
+ */
+
+/**
+ * @callback module:src/client.Client~requestCallback
+ * @param {?module:src/client.Client~ServiceError} error The error, if the call
+ *     failed
+ * @param {*} value The response value, if the call succeeded
+ */
+
+/**
+ * Make a unary request to the given method, using the given serialize
+ * and deserialize functions, with the given argument.
  * @param {string} method The name of the method to request
- * @param {function(*):Buffer} serialize The serialization function for inputs
- * @param {function(Buffer)} deserialize The deserialization function for
- *     outputs
- * @return {Function} makeClientStreamRequest
+ * @param {module:src/common~serialize} serialize The serialization function for
+ *     inputs
+ * @param {module:src/common~deserialize} deserialize The deserialization
+ *     function for outputs
+ * @param {*} argument The argument to the call. Should be serializable with
+ *     serialize
+ * @param {module:src/metadata.Metadata=} metadata Metadata to add to the call
+ * @param {module:src/client~CallOptions=} options Options map
+ * @param {module:src/client.Client~requestCallback} callback The callback to
+ *     for when the response is received
+ * @return {EventEmitter} An event emitter for stream related events
  */
-function makeClientStreamRequestFunction(method, serialize, deserialize) {
-  /**
-   * Make a client stream request with this method on the given channel with the
-   * given callback, etc.
-   * @this {Client} Client object. Must have a channel member.
-   * @param {Metadata=} metadata Array of metadata key/value pairs to add to the
-   *     call
-   * @param {Object=} options Options map
-   * @param {function(?Error, value=)} callback The callback to for when the
-   *     response is received
-   * @return {EventEmitter} An event emitter for stream related events
-   */
-  function makeClientStreamRequest(metadata, options, callback) {
-    /* jshint validthis: true */
-    /* While the arguments are listed in the function signature, those variables
-     * are not used directly. Instead, ArgueJS processes the arguments
-     * object. This allows for simple handling of optional arguments in the
-     * middle of the argument list, and also provides type checking. */
-    var args = arguejs({metadata: [Metadata, new Metadata()],
-                        options: [Object], callback: Function}, arguments);
-    var call = getCall(this.$channel, method, args.options);
-    metadata = args.metadata.clone();
-    var stream = new ClientWritableStream(call, serialize);
-    var metadata_batch = {};
-    metadata_batch[grpc.opType.SEND_INITIAL_METADATA] =
-        metadata._getCoreRepresentation();
-    metadata_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
-    call.startBatch(metadata_batch, function(err, response) {
+Client.prototype.makeUnaryRequest = function(method, serialize, deserialize,
+                                             argument, metadata, options,
+                                             callback) {
+  /* While the arguments are listed in the function signature, those variables
+   * are not used directly. Instead, ArgueJS processes the arguments
+   * object. This allows for simple handling of optional arguments in the
+   * middle of the argument list, and also provides type checking. */
+  var args = arguejs({method: String, serialize: Function,
+                      deserialize: Function,
+                      argument: null, metadata: [Metadata, new Metadata()],
+                      options: [Object], callback: Function}, arguments);
+  var call = getCall(this.$channel, method, args.options);
+  var emitter = new ClientUnaryCall(call);
+  metadata = args.metadata.clone();
+  var client_batch = {};
+  var message = serialize(args.argument);
+  if (args.options) {
+    message.grpcWriteFlags = args.options.flags;
+  }
+
+  client_batch[grpc.opType.SEND_INITIAL_METADATA] =
+      metadata._getCoreRepresentation();
+  client_batch[grpc.opType.SEND_MESSAGE] = message;
+  client_batch[grpc.opType.SEND_CLOSE_FROM_CLIENT] = true;
+  client_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
+  client_batch[grpc.opType.RECV_MESSAGE] = true;
+  client_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
+  call.startBatch(client_batch, function(err, response) {
+    response.status.metadata = Metadata._fromCoreRepresentation(
+        response.status.metadata);
+    var status = response.status;
+    var error;
+    var deserialized;
+    emitter.emit('metadata', Metadata._fromCoreRepresentation(
+        response.metadata));
+    if (status.code === grpc.status.OK) {
       if (err) {
-        // The call has stopped for some reason. A non-OK status will arrive
-        // in the other batch.
+        // Got a batch error, but OK status. Something went wrong
+        args.callback(err);
         return;
-      }
-      stream.emit('metadata', Metadata._fromCoreRepresentation(
-          response.metadata));
-    });
-    var client_batch = {};
-    client_batch[grpc.opType.RECV_MESSAGE] = true;
-    client_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
-    call.startBatch(client_batch, function(err, response) {
-      response.status.metadata = Metadata._fromCoreRepresentation(
-          response.status.metadata);
-      var status = response.status;
-      var error;
-      var deserialized;
-      if (status.code === grpc.status.OK) {
-        if (err) {
-          // Got a batch error, but OK status. Something went wrong
-          args.callback(err);
-          return;
-        } else {
-          try {
-            deserialized = deserialize(response.read);
-          } catch (e) {
-            /* Change status to indicate bad server response. This will result
-             * in passing an error to the callback */
-            status = {
-              code: grpc.status.INTERNAL,
-              details: 'Failed to parse server response'
-            };
-          }
-        }
-      }
-      if (status.code !== grpc.status.OK) {
-        error = new Error(response.status.details);
-        error.code = status.code;
-        error.metadata = status.metadata;
-        args.callback(error);
       } else {
-        args.callback(null, deserialized);
+        try {
+          deserialized = deserialize(response.read);
+        } catch (e) {
+          /* Change status to indicate bad server response. This will result
+           * in passing an error to the callback */
+          status = {
+            code: grpc.status.INTERNAL,
+            details: 'Failed to parse server response'
+          };
+        }
       }
-      stream.emit('status', status);
-    });
-    return stream;
-  }
-  return makeClientStreamRequest;
-}
+    }
+    if (status.code !== grpc.status.OK) {
+      error = new Error(status.details);
+      error.code = status.code;
+      error.metadata = status.metadata;
+      args.callback(error);
+    } else {
+      args.callback(null, deserialized);
+    }
+    emitter.emit('status', status);
+  });
+  return emitter;
+};
 
 /**
- * Get a function that can make server stream requests to the specified method.
+ * Make a client stream request to the given method, using the given serialize
+ * and deserialize functions, with the given argument.
  * @param {string} method The name of the method to request
- * @param {function(*):Buffer} serialize The serialization function for inputs
- * @param {function(Buffer)} deserialize The deserialization function for
- *     outputs
- * @return {Function} makeServerStreamRequest
+ * @param {module:src/common~serialize} serialize The serialization function for
+ *     inputs
+ * @param {module:src/common~deserialize} deserialize The deserialization
+ *     function for outputs
+ * @param {module:src/metadata.Metadata=} metadata Array of metadata key/value
+ *     pairs to add to the call
+ * @param {module:src/client~CallOptions=} options Options map
+ * @param {Client~requestCallback} callback The callback to for when the
+ *     response is received
+ * @return {module:src/client~ClientWritableStream} An event emitter for stream
+ *     related events
  */
-function makeServerStreamRequestFunction(method, serialize, deserialize) {
-  /**
-   * Make a server stream request with this method on the given channel with the
-   * given argument, etc.
-   * @this {SurfaceClient} Client object. Must have a channel member.
-   * @param {*} argument The argument to the call. Should be serializable with
-   *     serialize
-   * @param {Metadata=} metadata Array of metadata key/value pairs to add to the
-   *     call
-   * @param {Object} options Options map
-   * @return {EventEmitter} An event emitter for stream related events
-   */
-  function makeServerStreamRequest(argument, metadata, options) {
-    /* jshint validthis: true */
-    /* While the arguments are listed in the function signature, those variables
-     * are not used directly. Instead, ArgueJS processes the arguments
-     * object. */
-    var args = arguejs({argument: null, metadata: [Metadata, new Metadata()],
-                        options: [Object]}, arguments);
-    var call = getCall(this.$channel, method, args.options);
-    metadata = args.metadata.clone();
-    var stream = new ClientReadableStream(call, deserialize);
-    var start_batch = {};
-    var message = serialize(args.argument);
-    if (args.options) {
-      message.grpcWriteFlags = args.options.flags;
+Client.prototype.makeClientStreamRequest = function(method, serialize,
+                                                      deserialize, metadata,
+                                                      options, callback) {
+  /* While the arguments are listed in the function signature, those variables
+   * are not used directly. Instead, ArgueJS processes the arguments
+   * object. This allows for simple handling of optional arguments in the
+   * middle of the argument list, and also provides type checking. */
+  var args = arguejs({method:String, serialize: Function,
+                      deserialize: Function,
+                      metadata: [Metadata, new Metadata()],
+                      options: [Object], callback: Function}, arguments);
+  var call = getCall(this.$channel, method, args.options);
+  metadata = args.metadata.clone();
+  var stream = new ClientWritableStream(call, serialize);
+  var metadata_batch = {};
+  metadata_batch[grpc.opType.SEND_INITIAL_METADATA] =
+      metadata._getCoreRepresentation();
+  metadata_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
+  call.startBatch(metadata_batch, function(err, response) {
+    if (err) {
+      // The call has stopped for some reason. A non-OK status will arrive
+      // in the other batch.
+      return;
     }
-    start_batch[grpc.opType.SEND_INITIAL_METADATA] =
-        metadata._getCoreRepresentation();
-    start_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
-    start_batch[grpc.opType.SEND_MESSAGE] = message;
-    start_batch[grpc.opType.SEND_CLOSE_FROM_CLIENT] = true;
-    call.startBatch(start_batch, function(err, response) {
-      if (err) {
-        // The call has stopped for some reason. A non-OK status will arrive
-        // in the other batch.
-        return;
-      }
-      stream.emit('metadata', Metadata._fromCoreRepresentation(
-          response.metadata));
-    });
-    var status_batch = {};
-    status_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
-    call.startBatch(status_batch, function(err, response) {
+    stream.emit('metadata', Metadata._fromCoreRepresentation(
+        response.metadata));
+  });
+  var client_batch = {};
+  client_batch[grpc.opType.RECV_MESSAGE] = true;
+  client_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
+  call.startBatch(client_batch, function(err, response) {
+    response.status.metadata = Metadata._fromCoreRepresentation(
+        response.status.metadata);
+    var status = response.status;
+    var error;
+    var deserialized;
+    if (status.code === grpc.status.OK) {
       if (err) {
-        stream.emit('error', err);
+        // Got a batch error, but OK status. Something went wrong
+        args.callback(err);
         return;
+      } else {
+        try {
+          deserialized = deserialize(response.read);
+        } catch (e) {
+          /* Change status to indicate bad server response. This will result
+           * in passing an error to the callback */
+          status = {
+            code: grpc.status.INTERNAL,
+            details: 'Failed to parse server response'
+          };
+        }
       }
-      response.status.metadata = Metadata._fromCoreRepresentation(
-          response.status.metadata);
-      stream._receiveStatus(response.status);
-    });
-    return stream;
-  }
-  return makeServerStreamRequest;
-}
+    }
+    if (status.code !== grpc.status.OK) {
+      error = new Error(response.status.details);
+      error.code = status.code;
+      error.metadata = status.metadata;
+      args.callback(error);
+    } else {
+      args.callback(null, deserialized);
+    }
+    stream.emit('status', status);
+  });
+  return stream;
+};
 
 /**
- * Get a function that can make bidirectional stream requests to the specified
- * method.
+ * Make a server stream request to the given method, with the given serialize
+ * and deserialize function, using the given argument
  * @param {string} method The name of the method to request
- * @param {function(*):Buffer} serialize The serialization function for inputs
- * @param {function(Buffer)} deserialize The deserialization function for
- *     outputs
- * @return {Function} makeBidiStreamRequest
+ * @param {module:src/common~serialize} serialize The serialization function for
+ *     inputs
+ * @param {module:src/common~deserialize} deserialize The deserialization
+ *     function for outputs
+ * @param {*} argument The argument to the call. Should be serializable with
+ *     serialize
+ * @param {module:src/metadata.Metadata=} metadata Array of metadata key/value
+ *     pairs to add to the call
+ * @param {module:src/client~CallOptions=} options Options map
+ * @return {module:src/client~ClientReadableStream} An event emitter for stream
+ *     related events
  */
-function makeBidiStreamRequestFunction(method, serialize, deserialize) {
-  /**
-   * Make a bidirectional stream request with this method on the given channel.
-   * @this {SurfaceClient} Client object. Must have a channel member.
-   * @param {Metadata=} metadata Array of metadata key/value pairs to add to the
-   *     call
-   * @param {Options} options Options map
-   * @return {EventEmitter} An event emitter for stream related events
-   */
-  function makeBidiStreamRequest(metadata, options) {
-    /* jshint validthis: true */
-    /* While the arguments are listed in the function signature, those variables
-     * are not used directly. Instead, ArgueJS processes the arguments
-     * object. */
-    var args = arguejs({metadata: [Metadata, new Metadata()],
-                        options: [Object]}, arguments);
-    var call = getCall(this.$channel, method, args.options);
-    metadata = args.metadata.clone();
-    var stream = new ClientDuplexStream(call, serialize, deserialize);
-    var start_batch = {};
-    start_batch[grpc.opType.SEND_INITIAL_METADATA] =
-        metadata._getCoreRepresentation();
-    start_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
-    call.startBatch(start_batch, function(err, response) {
-      if (err) {
-        // The call has stopped for some reason. A non-OK status will arrive
-        // in the other batch.
-        return;
-      }
-      stream.emit('metadata', Metadata._fromCoreRepresentation(
-          response.metadata));
-    });
-    var status_batch = {};
-    status_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
-    call.startBatch(status_batch, function(err, response) {
-      if (err) {
-        stream.emit('error', err);
-        return;
-      }
-      response.status.metadata = Metadata._fromCoreRepresentation(
-          response.status.metadata);
-      stream._receiveStatus(response.status);
-    });
-    return stream;
+Client.prototype.makeServerStreamRequest = function(method, serialize,
+                                                    deserialize, argument,
+                                                    metadata, options) {
+  /* While the arguments are listed in the function signature, those variables
+   * are not used directly. Instead, ArgueJS processes the arguments
+   * object. */
+  var args = arguejs({method:String, serialize: Function,
+                      deserialize: Function,
+                      argument: null, metadata: [Metadata, new Metadata()],
+                      options: [Object]}, arguments);
+  var call = getCall(this.$channel, method, args.options);
+  metadata = args.metadata.clone();
+  var stream = new ClientReadableStream(call, deserialize);
+  var start_batch = {};
+  var message = serialize(args.argument);
+  if (args.options) {
+    message.grpcWriteFlags = args.options.flags;
   }
-  return makeBidiStreamRequest;
-}
+  start_batch[grpc.opType.SEND_INITIAL_METADATA] =
+      metadata._getCoreRepresentation();
+  start_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
+  start_batch[grpc.opType.SEND_MESSAGE] = message;
+  start_batch[grpc.opType.SEND_CLOSE_FROM_CLIENT] = true;
+  call.startBatch(start_batch, function(err, response) {
+    if (err) {
+      // The call has stopped for some reason. A non-OK status will arrive
+      // in the other batch.
+      return;
+    }
+    stream.emit('metadata', Metadata._fromCoreRepresentation(
+        response.metadata));
+  });
+  var status_batch = {};
+  status_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
+  call.startBatch(status_batch, function(err, response) {
+    if (err) {
+      stream.emit('error', err);
+      return;
+    }
+    response.status.metadata = Metadata._fromCoreRepresentation(
+        response.status.metadata);
+    stream._receiveStatus(response.status);
+  });
+  return stream;
+};
+
+
+/**
+ * Make a bidirectional stream request with this method on the given channel.
+ * @param {string} method The name of the method to request
+ * @param {module:src/common~serialize} serialize The serialization function for
+ *     inputs
+ * @param {module:src/common~deserialize} deserialize The deserialization
+ *     function for outputs
+ * @param {module:src/metadata.Metadata=} metadata Array of metadata key/value
+ *     pairs to add to the call
+ * @param {module:src/client~CallOptions=} options Options map
+ * @return {module:src/client~ClientDuplexStream} An event emitter for stream
+ *     related events
+ */
+Client.prototype.makeBidiStreamRequest = function(method, serialize,
+                                                  deserialize, metadata,
+                                                  options) {
+  /* While the arguments are listed in the function signature, those variables
+   * are not used directly. Instead, ArgueJS processes the arguments
+   * object. */
+  var args = arguejs({method:String, serialize: Function,
+                      deserialize: Function,
+                      metadata: [Metadata, new Metadata()],
+                      options: [Object]}, arguments);
+  var call = getCall(this.$channel, method, args.options);
+  metadata = args.metadata.clone();
+  var stream = new ClientDuplexStream(call, serialize, deserialize);
+  var start_batch = {};
+  start_batch[grpc.opType.SEND_INITIAL_METADATA] =
+      metadata._getCoreRepresentation();
+  start_batch[grpc.opType.RECV_INITIAL_METADATA] = true;
+  call.startBatch(start_batch, function(err, response) {
+    if (err) {
+      // The call has stopped for some reason. A non-OK status will arrive
+      // in the other batch.
+      return;
+    }
+    stream.emit('metadata', Metadata._fromCoreRepresentation(
+        response.metadata));
+  });
+  var status_batch = {};
+  status_batch[grpc.opType.RECV_STATUS_ON_CLIENT] = true;
+  call.startBatch(status_batch, function(err, response) {
+    if (err) {
+      stream.emit('error', err);
+      return;
+    }
+    response.status.metadata = Metadata._fromCoreRepresentation(
+        response.status.metadata);
+    stream._receiveStatus(response.status);
+  });
+  return stream;
+};
 
+Client.prototype.close = function() {
+  this.$channel.close();
+};
+
+/**
+ * Return the underlying channel object for the specified client
+ * @return {Channel} The channel
+ */
+Client.prototype.getChannel = function() {
+  return this.$channel;
+};
+
+/**
+ * Wait for the client to be ready. The callback will be called when the
+ * client has successfully connected to the server, and it will be called
+ * with an error if the attempt to connect to the server has unrecoverablly
+ * failed or if the deadline expires. This function will make the channel
+ * start connecting if it has not already done so.
+ * @param {(Date|Number)} deadline When to stop waiting for a connection. Pass
+ *     Infinity to wait forever.
+ * @param {function(Error)} callback The callback to call when done attempting
+ *     to connect.
+ */
+Client.prototype.waitForReady = function(deadline, callback) {
+  var self = this;
+  var checkState = function(err) {
+    if (err) {
+      callback(new Error('Failed to connect before the deadline'));
+      return;
+    }
+    var new_state = self.$channel.getConnectivityState(true);
+    if (new_state === grpc.connectivityState.READY) {
+      callback();
+    } else if (new_state === grpc.connectivityState.FATAL_FAILURE) {
+      callback(new Error('Failed to connect to server'));
+    } else {
+      self.$channel.watchConnectivityState(new_state, deadline, checkState);
+    }
+  };
+  checkState();
+};
 
 /**
  * Map with short names for each of the requester maker functions. Used in
  * makeClientConstructor
+ * @access private
  */
-var requester_makers = {
-  unary: makeUnaryRequestFunction,
-  server_stream: makeServerStreamRequestFunction,
-  client_stream: makeClientStreamRequestFunction,
-  bidi: makeBidiStreamRequestFunction
+var requester_funcs = {
+  unary: Client.prototype.makeUnaryRequest,
+  server_stream: Client.prototype.makeServerStreamRequest,
+  client_stream: Client.prototype.makeClientStreamRequest,
+  bidi: Client.prototype.makeBidiStreamRequest
 };
 
 function getDefaultValues(metadata, options) {
@@ -675,6 +804,7 @@ function getDefaultValues(metadata, options) {
 /**
  * Map with wrappers for each type of requester function to make it use the old
  * argument order with optional arguments after the callback.
+ * @access private
  */
 var deprecated_request_wrap = {
   unary: function(makeUnaryRequest) {
@@ -700,55 +830,33 @@ var deprecated_request_wrap = {
 };
 
 /**
- * Creates a constructor for a client with the given methods. The methods object
- * maps method name to an object with the following keys:
- * path: The path on the server for accessing the method. For example, for
- *     protocol buffers, we use "/service_name/method_name"
- * requestStream: bool indicating whether the client sends a stream
- * resonseStream: bool indicating whether the server sends a stream
- * requestSerialize: function to serialize request objects
- * responseDeserialize: function to deserialize response objects
- * @param {Object} methods An object mapping method names to method attributes
+ * Creates a constructor for a client with the given methods, as specified in
+ * the methods argument.
+ * @param {module:src/common~ServiceDefinition} methods An object mapping
+ *     method names to method attributes
  * @param {string} serviceName The fully qualified name of the service
- * @param {Object} class_options An options object. Currently only uses the key
- *     deprecatedArgumentOrder, a boolean that Indicates that the old argument
- *     order should be used for methods, with optional arguments at the end
- *     instead of the callback at the end. Defaults to false. This option is
- *     only a temporary stopgap measure to smooth an API breakage.
+ * @param {Object} class_options An options object.
+ * @param {boolean=} [class_options.deprecatedArgumentOrder=false] Indicates
+ *     that the old argument order should be used for methods, with optional
+ *     arguments at the end instead of the callback at the end. This option
+ *     is only a temporary stopgap measure to smooth an API breakage.
  *     It is deprecated, and new code should not use it.
- * @return {function(string, Object)} New client constructor
+ * @return {function(string, Object)} New client constructor, which is a
+ *     subclass of [grpc.Client]{@link module:src/client.Client}, and has the
+ *     same arguments as that constructor.
  */
 exports.makeClientConstructor = function(methods, serviceName,
                                          class_options) {
   if (!class_options) {
     class_options = {};
   }
-  /**
-   * Create a client with the given methods
-   * @constructor
-   * @param {string} address The address of the server to connect to
-   * @param {grpc.Credentials} credentials Credentials to use to connect
-   *     to the server
-   * @param {Object} options Options to pass to the underlying channel
-   */
-  function Client(address, credentials, options) {
-    if (!options) {
-      options = {};
-    }
-    /* Append the grpc-node user agent string after the application user agent
-     * string, and put the combination at the beginning of the user agent string
-     */
-    if (options['grpc.primary_user_agent']) {
-      options['grpc.primary_user_agent'] += ' ';
-    } else {
-      options['grpc.primary_user_agent'] = '';
-    }
-    options['grpc.primary_user_agent'] += 'grpc-node/' + version;
-    /* Private fields use $ as a prefix instead of _ because it is an invalid
-     * prefix of a method name */
-    this.$channel = new grpc.Channel(address, credentials, options);
+
+  function ServiceClient(address, credentials, options) {
+    Client.call(this, address, credentials, options);
   }
 
+  util.inherits(ServiceClient, Client);
+
   _.each(methods, function(attrs, name) {
     var method_type;
     if (_.startsWith(name, '$')) {
@@ -769,20 +877,20 @@ exports.makeClientConstructor = function(methods, serviceName,
     }
     var serialize = attrs.requestSerialize;
     var deserialize = attrs.responseDeserialize;
-    var method_func = requester_makers[method_type](
-        attrs.path, serialize, deserialize);
+    var method_func = _.partial(requester_funcs[method_type], attrs.path,
+                                serialize, deserialize);
     if (class_options.deprecatedArgumentOrder) {
-      Client.prototype[name] = deprecated_request_wrap(method_func);
+      ServiceClient.prototype[name] = deprecated_request_wrap(method_func);
     } else {
-      Client.prototype[name] = method_func;
+      ServiceClient.prototype[name] = method_func;
     }
     // Associate all provided attributes with the method
-    _.assign(Client.prototype[name], attrs);
+    _.assign(ServiceClient.prototype[name], attrs);
   });
 
-  Client.service = methods;
+  ServiceClient.service = methods;
 
-  return Client;
+  return ServiceClient;
 };
 
 /**
@@ -791,7 +899,7 @@ exports.makeClientConstructor = function(methods, serviceName,
  * @return {Channel} The channel
  */
 exports.getClientChannel = function(client) {
-  return client.$channel;
+  return Client.prototype.getChannel.call(client);
 };
 
 /**
@@ -807,21 +915,7 @@ exports.getClientChannel = function(client) {
  *     to connect.
  */
 exports.waitForClientReady = function(client, deadline, callback) {
-  var checkState = function(err) {
-    if (err) {
-      callback(new Error('Failed to connect before the deadline'));
-      return;
-    }
-    var new_state = client.$channel.getConnectivityState(true);
-    if (new_state === grpc.connectivityState.READY) {
-      callback();
-    } else if (new_state === grpc.connectivityState.FATAL_FAILURE) {
-      callback(new Error('Failed to connect to server'));
-    } else {
-      client.$channel.watchConnectivityState(new_state, deadline, checkState);
-    }
-  };
-  checkState();
+  Client.prototype.waitForReady.call(client, deadline, callback);
 };
 
 /**
diff --git a/src/node/src/common.js b/src/node/src/common.js
index 757969dbddb147a162738d6e61977cd6904d7229..4dad60e630f7476fd4e22174c9865425dfc4b2ab 100644
--- a/src/node/src/common.js
+++ b/src/node/src/common.js
@@ -43,7 +43,8 @@ var _ = require('lodash');
 
 /**
  * Wrap a function to pass null-like values through without calling it. If no
- * function is given, just uses the identity;
+ * function is given, just uses the identity.
+ * @private
  * @param {?function} func The function to wrap
  * @return {function} The wrapped function
  */
@@ -90,3 +91,67 @@ exports.defaultGrpcOptions = {
   enumsAsStrings: true,
   deprecatedArgumentOrder: false
 };
+
+// JSDoc definitions that are used in multiple other modules
+
+/**
+ * The EventEmitter class in the event standard module
+ * @external EventEmitter
+ * @see https://nodejs.org/api/events.html#events_class_eventemitter
+ */
+
+/**
+ * The Readable class in the stream standard module
+ * @external Readable
+ * @see https://nodejs.org/api/stream.html#stream_readable_streams
+ */
+
+/**
+ * The Writable class in the stream standard module
+ * @external Writable
+ * @see https://nodejs.org/api/stream.html#stream_writable_streams
+ */
+
+/**
+ * The Duplex class in the stream standard module
+ * @external Duplex
+ * @see https://nodejs.org/api/stream.html#stream_class_stream_duplex
+ */
+
+/**
+ * A serialization function
+ * @callback module:src/common~serialize
+ * @param {*} value The value to serialize
+ * @return {Buffer} The value serialized as a byte sequence
+ */
+
+/**
+ * A deserialization function
+ * @callback module:src/common~deserialize
+ * @param {Buffer} data The byte sequence to deserialize
+ * @return {*} The data deserialized as a value
+ */
+
+/**
+ * An object that completely defines a service method signature.
+ * @typedef {Object} module:src/common~MethodDefinition
+ * @property {string} path The method's URL path
+ * @property {boolean} requestStream Indicates whether the method accepts
+ *     a stream of requests
+ * @property {boolean} responseStream Indicates whether the method returns
+ *     a stream of responses
+ * @property {module:src/common~serialize} requestSerialize Serialization
+ *     function for request values
+ * @property {module:src/common~serialize} responseSerialize Serialization
+ *     function for response values
+ * @property {module:src/common~deserialize} requestDeserialize Deserialization
+ *     function for request data
+ * @property {module:src/common~deserialize} responseDeserialize Deserialization
+ *     function for repsonse data
+ */
+
+/**
+ * An object that completely defines a service.
+ * @typedef {Object.<string, module:src/common~MethodDefinition>}
+ *     module:src/common~ServiceDefinition
+ */
diff --git a/src/node/src/metadata.js b/src/node/src/metadata.js
index 612361b0eaf52bc82fdcdc48347824b6cddd9464..92cf23998b38b8a4052746a0fd422e69c4276be5 100644
--- a/src/node/src/metadata.js
+++ b/src/node/src/metadata.js
@@ -35,12 +35,7 @@
  * Metadata module
  *
  * This module defines the Metadata class, which represents header and trailer
- * metadata for gRPC calls. Here is an example of how to use it:
- *
- * var metadata = new metadata_module.Metadata();
- * metadata.set('key1', 'value1');
- * metadata.add('key1', 'value2');
- * metadata.get('key1') // returns ['value1', 'value2']
+ * metadata for gRPC calls.
  *
  * @module
  */
@@ -54,6 +49,12 @@ var grpc = require('./grpc_extension');
 /**
  * Class for storing metadata. Keys are normalized to lowercase ASCII.
  * @constructor
+ * @alias module:src/metadata.Metadata
+ * @example
+ * var metadata = new metadata_module.Metadata();
+ * metadata.set('key1', 'value1');
+ * metadata.add('key1', 'value2');
+ * metadata.get('key1') // returns ['value1', 'value2']
  */
 function Metadata() {
   this._internal_repr = {};
diff --git a/src/php/composer.json b/src/php/composer.json
index 24c17c3b572dacf45772a44005d11a71a09cda1e..a4fba7e4f6a888a963e4ccbc689c02057022fc85 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -5,7 +5,7 @@
   "version": "1.4.0",
   "require": {
     "php": ">=5.5.0",
-    "google/protobuf": "^v3.1.0"
+    "google/protobuf": "^v3.3.0"
   },
   "require-dev": {
     "google/auth": "v0.9"
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 4f072809c4741a35599f12d90a069f2866f26ad9..f4ccb1ab94c6a529b694062fdcd0bc100552a06f 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -284,9 +284,9 @@ class BuildExt(build_ext.build_ext):
                 stderr=subprocess.PIPE)
             make_out, make_err = make_process.communicate()
             if make_out and make_process.returncode != 0:
-                sys.stdout.write(make_out + '\n')
+                sys.stdout.write(str(make_out) + '\n')
             if make_err:
-                sys.stderr.write(make_err + '\n')
+                sys.stderr.write(str(make_err) + '\n')
             if make_process.returncode != 0:
                 raise Exception("make command failed!")
 
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index 64c51ee6a0e6b5ddce64721d01958b2ad1baef26..2952dcd36a98577b6b1d93cba4564d37e4a5fd91 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -66,7 +66,8 @@ class Future(six.with_metaclass(abc.ABCMeta)):
         Returns False under all other circumstances, for example:
         1. computation has begun and could not be canceled.
         2. computation has finished
-        3. computation is scheduled for execution and it is impossible to determine its state without blocking.
+        3. computation is scheduled for execution and it is impossible to
+           determine its state without blocking.
     """
         raise NotImplementedError()
 
@@ -123,8 +124,8 @@ class Future(six.with_metaclass(abc.ABCMeta)):
 
     Args:
       timeout: The length of time in seconds to wait for the computation to
-        finish or be cancelled. If None, the call will block until the computations's
-        termination.
+        finish or be cancelled. If None, the call will block until the
+        computations's termination.
 
     Returns:
       The return value of the computation.
@@ -146,8 +147,8 @@ class Future(six.with_metaclass(abc.ABCMeta)):
 
     Args:
       timeout: The length of time in seconds to wait for the computation to
-        terminate or be cancelled. If None, the call will block until the computations's
-        termination.
+        terminate or be cancelled. If None, the call will block until the
+        computations's termination.
 
     Returns:
         The exception raised by the computation, or None if the computation did
@@ -363,9 +364,9 @@ class ChannelCredentials(object):
     """An encapsulation of the data required to create a secure Channel.
 
   This class has no supported interface - it exists to define the type of its
-  instances and its instances exist to be passed to other functions. For example,
-  ssl_channel_credentials returns an instance, and secure_channel consumes an
-  instance of this class.
+  instances and its instances exist to be passed to other functions. For
+  example, ssl_channel_credentials returns an instance, and secure_channel
+  consumes an instance of this class.
   """
 
     def __init__(self, credentials):
@@ -373,7 +374,8 @@ class ChannelCredentials(object):
 
 
 class CallCredentials(object):
-    """An encapsulation of the data required to assert an identity over a channel.
+    """An encapsulation of the data required to assert an identity over a
+       channel.
 
   A CallCredentials may be composed with ChannelCredentials to always assert
   identity for every call over that Channel.
@@ -399,7 +401,8 @@ class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
     """Callback object received by a metadata plugin."""
 
     def __call__(self, metadata, error):
-        """Inform the gRPC runtime of the metadata to construct a CallCredentials.
+        """Inform the gRPC runtime of the metadata to construct a
+           CallCredentials.
 
     Args:
       metadata: The :term:`metadata` used to construct the CallCredentials.
@@ -879,8 +882,8 @@ class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
       handler_call_details: A HandlerCallDetails describing the RPC.
 
     Returns:
-      An RpcMethodHandler with which the RPC may be serviced if the implementation
-      chooses to service this RPC, or None otherwise.
+      An RpcMethodHandler with which the RPC may be serviced if the
+      implementation chooses to service this RPC, or None otherwise.
     """
         raise NotImplementedError()
 
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index 1806d311b47658b9a6a3496f78741b00e7c08d6b..f1c09de2eb1f0d6ca474a256e603bec91c922ce6 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -46,7 +46,7 @@ PACKAGE_DIRECTORIES = {
 SETUP_REQUIRES = (
     'grpcio-tools>={version}'.format(version=grpc_version.VERSION),)
 
-INSTALL_REQUIRES = ('protobuf>=3.2.0',
+INSTALL_REQUIRES = ('protobuf>=3.3.0',
                     'grpcio>={version}'.format(version=grpc_version.VERSION),)
 
 COMMAND_CLASS = {
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index e6cf54745e2d945f492bad31c25f55e215577029..cb49c3d57e7b608542e36f6a4f5950dfae69db78 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -47,7 +47,7 @@ PACKAGE_DIRECTORIES = {
 SETUP_REQUIRES = (
     'grpcio-tools>={version}'.format(version=grpc_version.VERSION),)
 
-INSTALL_REQUIRES = ('protobuf>=3.2.0',
+INSTALL_REQUIRES = ('protobuf>=3.3.0',
                     'grpcio>={version}'.format(version=grpc_version.VERSION),)
 
 COMMAND_CLASS = {
diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py
index b9f0264dae3a29eb025a6bc4ba301b0156e3ef8c..7ee5336a7d6cd07182d2bb8fffecaa3417c72c02 100644
--- a/src/python/grpcio_tests/setup.py
+++ b/src/python/grpcio_tests/setup.py
@@ -56,7 +56,7 @@ INSTALL_REQUIRES = (
     'grpcio>={version}'.format(version=grpc_version.VERSION),
     'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
     'grpcio-health-checking>={version}'.format(version=grpc_version.VERSION),
-    'oauth2client>=1.4.7', 'protobuf>=3.2.0', 'six>=1.10',)
+    'oauth2client>=1.4.7', 'protobuf>=3.3.0', 'six>=1.10',)
 
 COMMAND_CLASS = {
     # Run `preprocess` *before* doing any packaging!
diff --git a/templates/composer.json.template b/templates/composer.json.template
index 94f0c236a9606d43dc5a5b2d02a762fb99215055..2d4cb119195dbb4555cef7d35a67fcf086270739 100644
--- a/templates/composer.json.template
+++ b/templates/composer.json.template
@@ -9,7 +9,7 @@
     "license": "BSD-3-Clause",
     "require": {
       "php": ">=5.5.0",
-      "google/protobuf": "^v3.1.0"
+      "google/protobuf": "^v3.3.0"
     },
     "require-dev": {
       "google/auth": "v0.9"
diff --git a/templates/src/php/composer.json.template b/templates/src/php/composer.json.template
index 1887ee3c822afd08725295d1749b9d5db2257f15..36932cf8f61189404d0b63acd92d6443ce8b8f0e 100644
--- a/templates/src/php/composer.json.template
+++ b/templates/src/php/composer.json.template
@@ -7,7 +7,7 @@
     "version": "${settings.php_version.php_composer()}",
     "require": {
       "php": ">=5.5.0",
-      "google/protobuf": "^v3.1.0"
+      "google/protobuf": "^v3.3.0"
     },
     "require-dev": {
       "google/auth": "v0.9"
diff --git a/templates/tools/dockerfile/gcp_api_libraries.include b/templates/tools/dockerfile/gcp_api_libraries.include
index 669b0f887c820a41afe4377669aa39516bcec913..adecb92c150e3cf1427155c1f8ca7ed91b6411d5 100644
--- a/templates/tools/dockerfile/gcp_api_libraries.include
+++ b/templates/tools/dockerfile/gcp_api_libraries.include
@@ -1,4 +1,3 @@
 # Google Cloud platform API libraries
 RUN apt-get update && apt-get install -y python-pip && apt-get clean
 RUN pip install --upgrade google-api-python-client
-
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile.template
similarity index 94%
rename from templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
rename to templates/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile.template
index ad8ad71b5fab2ea955b43efcc4fccd0da08b3b1e..b4e618daeb73b7e37f222ac9c46d821d86b95550 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile.template
@@ -1,6 +1,6 @@
 %YAML 1.2
 --- |
-  # Copyright 2016, Google Inc.
+  # Copyright 2017, Google Inc.
   # All rights reserved.
   #
   # Redistribution and use in source and binary forms, with or without
@@ -29,10 +29,10 @@
   # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   
-  FROM golang:latest
+  FROM golang:1.7
   
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../python_deps.include"/>
   <%include file="../../go_path.include"/>
+  <%include file="../../python_deps.include"/>
   # Define the default command.
   CMD ["bash"]
+  
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile.template
similarity index 86%
rename from templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
rename to templates/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile.template
index d70b751b141ec304786dfb4058e3bce62606561d..437e8261b3a1b3840870534aa23879dec225f3c8 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile.template
@@ -1,6 +1,6 @@
 %YAML 1.2
 --- |
-  # Copyright 2016, Google Inc.
+  # Copyright 2017, Google Inc.
   # All rights reserved.
   #
   # Redistribution and use in source and binary forms, with or without
@@ -29,12 +29,10 @@
   # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   
-  FROM debian:jessie
+  FROM golang:1.8
   
-  <%include file="../../apt_get_basic.include"/>
+  <%include file="../../go_path.include"/>
   <%include file="../../python_deps.include"/>
-  <%include file="../../node_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../run_tests_addons.include"/>
   # Define the default command.
   CMD ["bash"]
+  
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
deleted file mode 100644
index 5d805bb4b22d1cf69fc38773f4371747f90124aa..0000000000000000000000000000000000000000
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
+++ /dev/null
@@ -1,42 +0,0 @@
-%YAML 1.2
---- |
-  # Copyright 2015, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  
-  FROM debian:jessie
-  
-  <%include file="../../apt_get_basic.include"/>
-  <%include file="../../python_deps.include"/>
-  <%include file="../../ccache_setup.include"/>
-  <%include file="../../cxx_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../csharp_deps.include"/>
-  # Define the default command.
-  CMD ["bash"]
-  
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
deleted file mode 100644
index 18f06b770c3031a4c569f6035a66b6efa2042baf..0000000000000000000000000000000000000000
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
+++ /dev/null
@@ -1,41 +0,0 @@
-%YAML 1.2
---- |
-  # Copyright 2015-2016, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  
-  FROM debian:jessie
-  
-  <%include file="../../apt_get_basic.include"/>
-  <%include file="../../python_deps.include"/>
-  <%include file="../../ccache_setup.include"/>
-  <%include file="../../cxx_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../clang_update.include"/>
-  # Define the default command.
-  CMD ["bash"]
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
deleted file mode 100644
index 2bb2f9ba1e6f5e90b1913a6759c1bfb70b76c6de..0000000000000000000000000000000000000000
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
+++ /dev/null
@@ -1,41 +0,0 @@
-%YAML 1.2
---- |
-  # Copyright 2016, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  
-  FROM debian:jessie
-  
-  <%include file="../../apt_get_basic.include"/>
-  <%include file="../../python_deps.include"/>
-  <%include file="../../ccache_setup.include"/>
-  <%include file="../../cxx_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../java_deps.include"/>
-  # Define the default command.
-  CMD ["bash"]
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
deleted file mode 100644
index 49ba60168da13ff905f6baf3590579cd468e29f7..0000000000000000000000000000000000000000
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
+++ /dev/null
@@ -1,46 +0,0 @@
-%YAML 1.2
---- |
-  # Copyright 2015, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  
-  FROM debian:jessie
-  
-  <%include file="../../apt_get_basic.include"/>
-  <%include file="../../python_deps.include"/>
-  <%include file="../../ruby_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../php_deps.include"/>
-  <%include file="../../run_tests_addons.include"/>
-  # Install composer
-  RUN curl -sS https://getcomposer.org/installer | php
-  RUN mv composer.phar /usr/local/bin/composer
-  
-  # Define the default command.
-  CMD ["bash"]
-  
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template
deleted file mode 100644
index 27e9eeec5a838cbdee017b258a33ec42f14985e4..0000000000000000000000000000000000000000
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template
+++ /dev/null
@@ -1,45 +0,0 @@
-%YAML 1.2
---- |
-  # Copyright 2016, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  
-  FROM debian:jessie
-  
-  <%include file="../../apt_get_basic.include"/>
-  <%include file="../../ccache_setup.include"/>
-  <%include file="../../cxx_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../python_deps.include"/>
-
-  RUN pip install coverage
-  RUN pip install oauth2client
-
-  # Define the default command.
-  CMD ["bash"]
-  
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
deleted file mode 100644
index 18199771d7ec0ff281545f7fc4f6940db58cf9ac..0000000000000000000000000000000000000000
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
+++ /dev/null
@@ -1,42 +0,0 @@
-%YAML 1.2
---- |
-  # Copyright 2015, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  
-  FROM debian:jessie
-  
-  <%include file="../../apt_get_basic.include"/>
-  <%include file="../../python_deps.include"/>
-  <%include file="../../ccache_setup.include"/>
-  <%include file="../../cxx_deps.include"/>
-  <%include file="../../gcp_api_libraries.include"/>
-  <%include file="../../ruby_deps.include"/>
-  # Define the default command.
-  CMD ["bash"]
-  
diff --git a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
index 092f04dac6c991f902c80847b3064bce65bfcf97..f869cf9106a56a9b46a0a0f11a41f7bdb69147c8 100644
--- a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../csharp_deps.include"/>
   <%include file="../../csharp_dotnetcli_deps.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
index 35b0e177fb6a44efc12045c8ac1bba4b10f6717a..a42215d257056eb733b80801a27b5fe09db8e954 100644
--- a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
 
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../clang_update.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
index 643b5cb65ba3c3a2a5c3b9b2003982f39fd64f92..9df81583828230390bac0cc0905037c07924a1d7 100644
--- a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM 32bit/debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
index 8a95cad6492baed88108e3ce4663d66baf5690ea..98854ff1c4f84eab48904bfccb3049f44f509ff5 100644
--- a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM ubuntu:14.04
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons_nocache.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
index 42ad6c130da5f69006d46a6f1f5f4fda35c34c20..3ad65e9eeee73cd7288a08190b7008d03c1cd161 100644
--- a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM ubuntu:16.04
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
index 5c38e9a0e924770fe2d1f6dde886aa94a288c57d..6d74f4c8d1636e1267b4ea5944383de274dce3c7 100644
--- a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:wheezy
 
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
 
diff --git a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
index 6d7cb72f2742f98a9c95d8bf7b09092fbb914b15..32a843a4823e49a54de3d330bb3059d3d5d7c0ed 100644
--- a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
+++ b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
 
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../clang_update.include"/>
diff --git a/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template
index 93d26b559453a99c04bf74451654fc3bd5cd0848..bf64ba210a95fe708ca806ad592ca08cc36ea157 100644
--- a/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../csharp_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../node_deps.include"/>
diff --git a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
index ceaa9aa5abccbe880db12cfd64a407870f39066b..103b1ef848b2be3c99bf0ae83f94ca0dac5699fb 100644
--- a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
 
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
 
   # Install Electron apt dependencies
   RUN apt-get update && apt-get install -y ${'\\'}
diff --git a/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template
index e6a213d90d30f09517b2ffca0e93251dfad9d72d..158100283026bdcacb774e484fd40f10c0fb29ec 100644
--- a/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../php7_deps.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
index 0cfa373c9054090269036d0bbc718c58ce96086b..ced93d0406013bc3674714bf584da6e6588664fd 100644
--- a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../php_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/python_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/python_jessie_x64/Dockerfile.template
index 46fb84ba830d6a1d6ae4e065cb3bced49b6208c7..26ff987ac23afbaabef7999775f259d34542c6b5 100644
--- a/templates/tools/dockerfile/test/python_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/python_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
   CMD ["bash"]
-  
\ No newline at end of file
diff --git a/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
index f9a4dcb7b6c30e3c83fed4db68bb7ee224e26f7e..62d9b92400bf60bbaf7c8b27dac9e6556dc1e5f6 100644
--- a/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../apt_get_pyenv.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
index 35838bc11e420dc57e19d93add5399f4487bad8d..d1899853e630ef2e212fef722dba3c40ee3d4308 100644
--- a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   <%include file="../../ruby_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template
index 8617666b21eb2f36395794caf85fa90d4d330f5f..6943134b6a1994ccf3d64c1551895887f10aa1df 100644
--- a/templates/tools/dockerfile/test/sanity/Dockerfile.template
+++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM ubuntu:15.10
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../python_deps.include"/>
   #========================
   # Sanity test dependencies
diff --git a/test/core/end2end/cq_verifier.c b/test/core/end2end/cq_verifier.c
index 5eea5d43fe052111af55d4933596df834a725001..0fafb0c8c9979fe4294a11c28222d49f1b1c8512 100644
--- a/test/core/end2end/cq_verifier.c
+++ b/test/core/end2end/cq_verifier.c
@@ -77,7 +77,7 @@ struct cq_verifier {
 };
 
 cq_verifier *cq_verifier_create(grpc_completion_queue *cq) {
-  cq_verifier *v = gpr_malloc(sizeof(cq_verifier));
+  cq_verifier *v = (cq_verifier *)gpr_malloc(sizeof(cq_verifier));
   v->cq = cq;
   v->first_expectation = NULL;
   return v;
@@ -314,7 +314,7 @@ void cq_verify_empty(cq_verifier *v) { cq_verify_empty_timeout(v, 1); }
 
 static void add(cq_verifier *v, const char *file, int line,
                 grpc_completion_type type, void *tag, bool success) {
-  expectation *e = gpr_malloc(sizeof(expectation));
+  expectation *e = (expectation *)gpr_malloc(sizeof(expectation));
   e->type = type;
   e->file = file;
   e->line = line;
diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c
index df902a24bffa4bb1273681501e9ef15fc71c646e..736b224fd60e2fd68be5f94751c8ca8fe99c5595 100644
--- a/test/core/end2end/fake_resolver.c
+++ b/test/core/end2end/fake_resolver.c
@@ -136,7 +136,7 @@ struct grpc_fake_resolver_response_generator {
 grpc_fake_resolver_response_generator*
 grpc_fake_resolver_response_generator_create() {
   grpc_fake_resolver_response_generator* generator =
-      gpr_zalloc(sizeof(*generator));
+      (grpc_fake_resolver_response_generator*)gpr_zalloc(sizeof(*generator));
   gpr_ref_init(&generator->refcount, 1);
   return generator;
 }
@@ -157,7 +157,8 @@ void grpc_fake_resolver_response_generator_unref(
 
 static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg,
                             grpc_error* error) {
-  grpc_fake_resolver_response_generator* generator = arg;
+  grpc_fake_resolver_response_generator* generator =
+      (grpc_fake_resolver_response_generator*)arg;
   fake_resolver* r = generator->resolver;
   if (r->next_results != NULL) {
     grpc_channel_args_destroy(exec_ctx, r->next_results);
@@ -180,11 +181,13 @@ void grpc_fake_resolver_response_generator_set_response(
 }
 
 static void* response_generator_arg_copy(void* p) {
-  return grpc_fake_resolver_response_generator_ref(p);
+  return grpc_fake_resolver_response_generator_ref(
+      (grpc_fake_resolver_response_generator*)p);
 }
 
 static void response_generator_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
-  grpc_fake_resolver_response_generator_unref(p);
+  grpc_fake_resolver_response_generator_unref(
+      (grpc_fake_resolver_response_generator*)p);
 }
 
 static int response_generator_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
@@ -208,7 +211,7 @@ grpc_fake_resolver_get_response_generator(const grpc_channel_args* args) {
   const grpc_arg* arg =
       grpc_channel_args_find(args, GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR);
   if (arg == NULL || arg->type != GRPC_ARG_POINTER) return NULL;
-  return arg->value.pointer.p;
+  return (grpc_fake_resolver_response_generator*)arg->value.pointer.p;
 }
 
 //
@@ -222,7 +225,7 @@ static void fake_resolver_factory_unref(grpc_resolver_factory* factory) {}
 static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx,
                                            grpc_resolver_factory* factory,
                                            grpc_resolver_args* args) {
-  fake_resolver* r = gpr_zalloc(sizeof(*r));
+  fake_resolver* r = (fake_resolver*)gpr_zalloc(sizeof(*r));
   r->channel_args = grpc_channel_args_copy(args->args);
   grpc_resolver_init(&r->base, &fake_resolver_vtable, args->combiner);
   grpc_fake_resolver_response_generator* response_generator =
diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c
index f0d09487c62d215db91fb78748b917e2b0685e06..5df43b93847645f743827e1a41098b6c65ecb2d6 100644
--- a/test/core/end2end/fixtures/http_proxy_fixture.c
+++ b/test/core/end2end/fixtures/http_proxy_fixture.c
@@ -156,7 +156,7 @@ static void proxy_connection_failed(grpc_exec_ctx* exec_ctx,
 // Callback for writing proxy data to the client.
 static void on_client_write_done(grpc_exec_ctx* exec_ctx, void* arg,
                                  grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                             "HTTP proxy client write", error);
@@ -181,7 +181,7 @@ static void on_client_write_done(grpc_exec_ctx* exec_ctx, void* arg,
 // Callback for writing proxy data to the backend server.
 static void on_server_write_done(grpc_exec_ctx* exec_ctx, void* arg,
                                  grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     proxy_connection_failed(exec_ctx, conn, false /* is_client */,
                             "HTTP proxy server write", error);
@@ -207,7 +207,7 @@ static void on_server_write_done(grpc_exec_ctx* exec_ctx, void* arg,
 // the backend server.
 static void on_client_read_done(grpc_exec_ctx* exec_ctx, void* arg,
                                 grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                             "HTTP proxy client read", error);
@@ -239,7 +239,7 @@ static void on_client_read_done(grpc_exec_ctx* exec_ctx, void* arg,
 // proxied to the client.
 static void on_server_read_done(grpc_exec_ctx* exec_ctx, void* arg,
                                 grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     proxy_connection_failed(exec_ctx, conn, false /* is_client */,
                             "HTTP proxy server read", error);
@@ -270,7 +270,7 @@ static void on_server_read_done(grpc_exec_ctx* exec_ctx, void* arg,
 // Callback to write the HTTP response for the CONNECT request.
 static void on_write_response_done(grpc_exec_ctx* exec_ctx, void* arg,
                                    grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                             "HTTP proxy write response", error);
@@ -294,7 +294,7 @@ static void on_write_response_done(grpc_exec_ctx* exec_ctx, void* arg,
 // CONNECT request.
 static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg,
                                    grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     // TODO(roth): Technically, in this case, we should handle the error
     // by returning an HTTP response to the client indicating that the
@@ -324,7 +324,7 @@ static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg,
 // which will cause the client connection to be dropped.
 static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
                                  grpc_error* error) {
-  proxy_connection* conn = arg;
+  proxy_connection* conn = (proxy_connection*)arg;
   gpr_log(GPR_DEBUG, "on_read_request_done: %p %s", conn,
           grpc_error_string(error));
   if (error != GRPC_ERROR_NONE) {
@@ -389,9 +389,9 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
                       grpc_endpoint* endpoint, grpc_pollset* accepting_pollset,
                       grpc_tcp_server_acceptor* acceptor) {
   gpr_free(acceptor);
-  grpc_end2end_http_proxy* proxy = arg;
+  grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)arg;
   // Instantiate proxy_connection.
-  proxy_connection* conn = gpr_zalloc(sizeof(*conn));
+  proxy_connection* conn = (proxy_connection*)gpr_zalloc(sizeof(*conn));
   gpr_ref(&proxy->users);
   conn->client_endpoint = endpoint;
   conn->proxy = proxy;
@@ -430,7 +430,7 @@ static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
 //
 
 static void thread_main(void* arg) {
-  grpc_end2end_http_proxy* proxy = arg;
+  grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)arg;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   do {
     gpr_ref(&proxy->users);
@@ -450,7 +450,8 @@ static void thread_main(void* arg) {
 
 grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_end2end_http_proxy* proxy = gpr_malloc(sizeof(*proxy));
+  grpc_end2end_http_proxy* proxy =
+      (grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy));
   memset(proxy, 0, sizeof(*proxy));
   gpr_ref_init(&proxy->users, 1);
   // Construct proxy address.
@@ -473,7 +474,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(port == proxy_port);
   // Start server.
-  proxy->pollset = gpr_zalloc(grpc_pollset_size());
+  proxy->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
   grpc_pollset_init(proxy->pollset, &proxy->mu);
   grpc_tcp_server_start(&exec_ctx, proxy->server, &proxy->pollset, 1, on_accept,
                         proxy);
@@ -487,7 +488,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
 
 static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* arg,
                             grpc_error* error) {
-  grpc_pollset* pollset = arg;
+  grpc_pollset* pollset = (grpc_pollset*)arg;
   grpc_pollset_destroy(pollset);
   gpr_free(pollset);
 }
diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c
index f0550db1d06b2c358e0941186381e2358e4dfcea..838625705dda694b46ed608a8ffe66988ccc366f 100644
--- a/test/core/security/oauth2_utils.c
+++ b/test/core/security/oauth2_utils.c
@@ -55,7 +55,7 @@ static void on_oauth2_response(grpc_exec_ctx *exec_ctx, void *user_data,
                                grpc_credentials_md *md_elems, size_t num_md,
                                grpc_credentials_status status,
                                const char *error_details) {
-  oauth2_request *request = user_data;
+  oauth2_request *request = (oauth2_request *)user_data;
   char *token = NULL;
   grpc_slice token_slice;
   if (status == GRPC_CREDENTIALS_ERROR) {
@@ -63,7 +63,7 @@ static void on_oauth2_response(grpc_exec_ctx *exec_ctx, void *user_data,
   } else {
     GPR_ASSERT(num_md == 1);
     token_slice = md_elems[0].value;
-    token = gpr_malloc(GRPC_SLICE_LENGTH(token_slice) + 1);
+    token = (char *)gpr_malloc(GRPC_SLICE_LENGTH(token_slice) + 1);
     memcpy(token, GRPC_SLICE_START_PTR(token_slice),
            GRPC_SLICE_LENGTH(token_slice));
     token[GRPC_SLICE_LENGTH(token_slice)] = '\0';
@@ -87,7 +87,7 @@ char *grpc_test_fetch_oauth2_token_with_credentials(
   grpc_closure do_nothing_closure;
   grpc_auth_metadata_context null_ctx = {"", "", NULL, NULL};
 
-  grpc_pollset *pollset = gpr_zalloc(grpc_pollset_size());
+  grpc_pollset *pollset = (grpc_pollset *)gpr_zalloc(grpc_pollset_size());
   grpc_pollset_init(pollset, &request.mu);
   request.pops = grpc_polling_entity_create_from_pollset(pollset);
   request.is_done = 0;
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index c91219e98c8ce3467f9e7d5429f7201feb8843f5..67e7c025351dfd0c9749e4cf65c5333fc3518337 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -563,7 +563,8 @@ static void BM_IsolatedFilter(benchmark::State &state) {
   }
 
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  size_t channel_size = grpc_channel_stack_size(&filters[0], filters.size());
+  size_t channel_size = grpc_channel_stack_size(
+      filters.size() == 0 ? NULL : &filters[0], filters.size());
   grpc_channel_stack *channel_stack =
       static_cast<grpc_channel_stack *>(gpr_zalloc(channel_size));
   GPR_ASSERT(GRPC_LOG_IF_ERROR(
diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
index 01d2023fa63042ef5587641fd413b9f6b0f687b1..d7e3a9cf47d840c932908bb8069944d81256ab6c 100644
--- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
@@ -53,7 +53,7 @@ DEFINE_int32(
     "Number of megabytes to pump before collecting flow control stats");
 DEFINE_int32(
     warmup_iterations, 100,
-    "Number of megabytes to pump before collecting flow control stats");
+    "Number of iterations to run before collecting flow control stats");
 DEFINE_int32(warmup_max_time_seconds, 10,
              "Maximum number of seconds to run warmup loop");
 
@@ -77,13 +77,14 @@ static void write_csv(std::ostream* out, A0&& a0, Arg&&... arg) {
 
 class TrickledCHTTP2 : public EndpointPairFixture {
  public:
-  TrickledCHTTP2(Service* service, size_t message_size,
-                 size_t kilobits_per_second)
+  TrickledCHTTP2(Service* service, bool streaming, size_t req_size,
+                 size_t resp_size, size_t kilobits_per_second)
       : EndpointPairFixture(service, MakeEndpoints(kilobits_per_second),
                             FixtureConfiguration()) {
     if (FLAGS_log) {
       std::ostringstream fn;
-      fn << "trickle." << message_size << "." << kilobits_per_second << ".csv";
+      fn << "trickle." << (streaming ? "streaming" : "unary") << "." << req_size
+         << "." << resp_size << "." << kilobits_per_second << ".csv";
       log_.reset(new std::ofstream(fn.str().c_str()));
       write_csv(log_.get(), "t", "iteration", "client_backlog",
                 "server_backlog", "client_t_stall", "client_s_stall",
@@ -245,8 +246,9 @@ static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok,
 
 static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
   EchoTestService::AsyncService service;
-  std::unique_ptr<TrickledCHTTP2> fixture(
-      new TrickledCHTTP2(&service, state.range(0), state.range(1)));
+  std::unique_ptr<TrickledCHTTP2> fixture(new TrickledCHTTP2(
+      &service, true, state.range(0) /* req_size */,
+      state.range(0) /* resp_size */, state.range(1) /* bw in kbit/s */));
   {
     EchoResponse send_response;
     EchoResponse recv_response;
@@ -317,11 +319,7 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
   state.SetBytesProcessed(state.range(0) * state.iterations());
 }
 
-/*******************************************************************************
- * CONFIGURATIONS
- */
-
-static void TrickleArgs(benchmark::internal::Benchmark* b) {
+static void StreamingTrickleArgs(benchmark::internal::Benchmark* b) {
   for (int i = 1; i <= 128 * 1024 * 1024; i *= 8) {
     for (int j = 64; j <= 128 * 1024 * 1024; j *= 8) {
       double expected_time =
@@ -331,8 +329,111 @@ static void TrickleArgs(benchmark::internal::Benchmark* b) {
     }
   }
 }
+BENCHMARK(BM_PumpStreamServerToClient_Trickle)->Apply(StreamingTrickleArgs);
 
-BENCHMARK(BM_PumpStreamServerToClient_Trickle)->Apply(TrickleArgs);
+static void BM_PumpUnbalancedUnary_Trickle(benchmark::State& state) {
+  EchoTestService::AsyncService service;
+  std::unique_ptr<TrickledCHTTP2> fixture(new TrickledCHTTP2(
+      &service, true, state.range(0) /* req_size */,
+      state.range(1) /* resp_size */, state.range(2) /* bw in kbit/s */));
+  EchoRequest send_request;
+  EchoResponse send_response;
+  EchoResponse recv_response;
+  if (state.range(0) > 0) {
+    send_request.set_message(std::string(state.range(0), 'a'));
+  }
+  if (state.range(1) > 0) {
+    send_response.set_message(std::string(state.range(1), 'a'));
+  }
+  Status recv_status;
+  struct ServerEnv {
+    ServerContext ctx;
+    EchoRequest recv_request;
+    grpc::ServerAsyncResponseWriter<EchoResponse> response_writer;
+    ServerEnv() : response_writer(&ctx) {}
+  };
+  uint8_t server_env_buffer[2 * sizeof(ServerEnv)];
+  ServerEnv* server_env[2] = {
+      reinterpret_cast<ServerEnv*>(server_env_buffer),
+      reinterpret_cast<ServerEnv*>(server_env_buffer + sizeof(ServerEnv))};
+  new (server_env[0]) ServerEnv;
+  new (server_env[1]) ServerEnv;
+  service.RequestEcho(&server_env[0]->ctx, &server_env[0]->recv_request,
+                      &server_env[0]->response_writer, fixture->cq(),
+                      fixture->cq(), tag(0));
+  service.RequestEcho(&server_env[1]->ctx, &server_env[1]->recv_request,
+                      &server_env[1]->response_writer, fixture->cq(),
+                      fixture->cq(), tag(1));
+  std::unique_ptr<EchoTestService::Stub> stub(
+      EchoTestService::NewStub(fixture->channel()));
+  auto inner_loop = [&](bool in_warmup) {
+    GPR_TIMER_SCOPE("BenchmarkCycle", 0);
+    recv_response.Clear();
+    ClientContext cli_ctx;
+    std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+        stub->AsyncEcho(&cli_ctx, send_request, fixture->cq()));
+    void* t;
+    bool ok;
+    TrickleCQNext(fixture.get(), &t, &ok, state.iterations());
+    GPR_ASSERT(ok);
+    GPR_ASSERT(t == tag(0) || t == tag(1));
+    intptr_t slot = reinterpret_cast<intptr_t>(t);
+    ServerEnv* senv = server_env[slot];
+    senv->response_writer.Finish(send_response, Status::OK, tag(3));
+    response_reader->Finish(&recv_response, &recv_status, tag(4));
+    for (int i = (1 << 3) | (1 << 4); i != 0;) {
+      TrickleCQNext(fixture.get(), &t, &ok, state.iterations());
+      GPR_ASSERT(ok);
+      int tagnum = (int)reinterpret_cast<intptr_t>(t);
+      GPR_ASSERT(i & (1 << tagnum));
+      i -= 1 << tagnum;
+    }
+    GPR_ASSERT(recv_status.ok());
+
+    senv->~ServerEnv();
+    senv = new (senv) ServerEnv();
+    service.RequestEcho(&senv->ctx, &senv->recv_request, &senv->response_writer,
+                        fixture->cq(), fixture->cq(), tag(slot));
+  };
+  gpr_timespec warmup_start = gpr_now(GPR_CLOCK_MONOTONIC);
+  for (int i = 0;
+       i < GPR_MAX(FLAGS_warmup_iterations, FLAGS_warmup_megabytes * 1024 *
+                                                1024 / (14 + state.range(0)));
+       i++) {
+    inner_loop(true);
+    if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), warmup_start),
+                     gpr_time_from_seconds(FLAGS_warmup_max_time_seconds,
+                                           GPR_TIMESPAN)) > 0) {
+      break;
+    }
+  }
+  while (state.KeepRunning()) {
+    inner_loop(false);
+  }
+  fixture->Finish(state);
+  fixture.reset();
+  server_env[0]->~ServerEnv();
+  server_env[1]->~ServerEnv();
+  state.SetBytesProcessed(state.range(0) * state.iterations() +
+                          state.range(1) * state.iterations());
+}
+
+static void UnaryTrickleArgs(benchmark::internal::Benchmark* b) {
+  const int cli_1024k = 1024 * 1024;
+  const int cli_32M = 32 * 1024 * 1024;
+  const int svr_256k = 256 * 1024;
+  const int svr_4M = 4 * 1024 * 1024;
+  const int svr_64M = 64 * 1024 * 1024;
+  for (int bw = 64; bw <= 128 * 1024 * 1024; bw *= 16) {
+    b->Args({bw, cli_1024k, svr_256k});
+    b->Args({bw, cli_1024k, svr_4M});
+    b->Args({bw, cli_1024k, svr_64M});
+    b->Args({bw, cli_32M, svr_256k});
+    b->Args({bw, cli_32M, svr_4M});
+    b->Args({bw, cli_32M, svr_64M});
+  }
+}
+BENCHMARK(BM_PumpUnbalancedUnary_Trickle)->Apply(UnaryTrickleArgs);
 }
 }
 
diff --git a/test/cpp/performance/writes_per_rpc_test.cc b/test/cpp/performance/writes_per_rpc_test.cc
index 7a914c15474d6b6bef0e6c71869518648932df64..12d8268330c26de906be7b5c3882e0caa5c470d3 100644
--- a/test/cpp/performance/writes_per_rpc_test.cc
+++ b/test/cpp/performance/writes_per_rpc_test.cc
@@ -254,8 +254,8 @@ TEST(WritesPerRpcTest, UnaryPingPong) {
   EXPECT_LT(UnaryPingPong(0, 0), 2.05);
   EXPECT_LT(UnaryPingPong(1, 0), 2.05);
   EXPECT_LT(UnaryPingPong(0, 1), 2.05);
-  EXPECT_LT(UnaryPingPong(4096, 0), 2.2);
-  EXPECT_LT(UnaryPingPong(0, 4096), 2.2);
+  EXPECT_LT(UnaryPingPong(4096, 0), 2.5);
+  EXPECT_LT(UnaryPingPong(0, 4096), 2.5);
 }
 
 }  // namespace testing
diff --git a/third_party/protobuf b/third_party/protobuf
index 593e917c176b5bc5aafa57bf9f6030d749d91cd5..a6189acd18b00611c1dc7042299ad75486f08a1a 160000
--- a/third_party/protobuf
+++ b/third_party/protobuf
@@ -1 +1 @@
-Subproject commit 593e917c176b5bc5aafa57bf9f6030d749d91cd5
+Subproject commit a6189acd18b00611c1dc7042299ad75486f08a1a
diff --git a/tools/distrib/python/grpcio_tools/protoc_lib_deps.py b/tools/distrib/python/grpcio_tools/protoc_lib_deps.py
index c2aa6198b3d7c442548f2f1c4fa9dd5c18a76e63..8a251f876ab9a609023713afef8ae494d016f501 100644
--- a/tools/distrib/python/grpcio_tools/protoc_lib_deps.py
+++ b/tools/distrib/python/grpcio_tools/protoc_lib_deps.py
@@ -29,8 +29,8 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # AUTO-GENERATED BY make_grpcio_tools.py!
-CC_FILES=['google/protobuf/compiler/zip_writer.cc', 'google/protobuf/compiler/subprocess.cc', 'google/protobuf/compiler/ruby/ruby_generator.cc', 'google/protobuf/compiler/python/python_generator.cc', 'google/protobuf/compiler/plugin.pb.cc', 'google/protobuf/compiler/plugin.cc', 'google/protobuf/compiler/php/php_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_primitive_field.cc', 'google/protobuf/compiler/objectivec/objectivec_oneof.cc', 'google/protobuf/compiler/objectivec/objectivec_message_field.cc', 'google/protobuf/compiler/objectivec/objectivec_message.cc', 'google/protobuf/compiler/objectivec/objectivec_map_field.cc', 'google/protobuf/compiler/objectivec/objectivec_helpers.cc', 'google/protobuf/compiler/objectivec/objectivec_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_file.cc', 'google/protobuf/compiler/objectivec/objectivec_field.cc', 'google/protobuf/compiler/objectivec/objectivec_extension.cc', 'google/protobuf/compiler/objectivec/objectivec_enum_field.cc', 'google/protobuf/compiler/objectivec/objectivec_enum.cc', 'google/protobuf/compiler/js/well_known_types_embed.cc', 'google/protobuf/compiler/js/js_generator.cc', 'google/protobuf/compiler/javanano/javanano_primitive_field.cc', 'google/protobuf/compiler/javanano/javanano_message_field.cc', 'google/protobuf/compiler/javanano/javanano_message.cc', 'google/protobuf/compiler/javanano/javanano_map_field.cc', 'google/protobuf/compiler/javanano/javanano_helpers.cc', 'google/protobuf/compiler/javanano/javanano_generator.cc', 'google/protobuf/compiler/javanano/javanano_file.cc', 'google/protobuf/compiler/javanano/javanano_field.cc', 'google/protobuf/compiler/javanano/javanano_extension.cc', 'google/protobuf/compiler/javanano/javanano_enum_field.cc', 'google/protobuf/compiler/javanano/javanano_enum.cc', 'google/protobuf/compiler/java/java_string_field_lite.cc', 'google/protobuf/compiler/java/java_string_field.cc', 'google/protobuf/compiler/java/java_shared_code_generator.cc', 'google/protobuf/compiler/java/java_service.cc', 'google/protobuf/compiler/java/java_primitive_field_lite.cc', 'google/protobuf/compiler/java/java_primitive_field.cc', 'google/protobuf/compiler/java/java_name_resolver.cc', 'google/protobuf/compiler/java/java_message_lite.cc', 'google/protobuf/compiler/java/java_message_field_lite.cc', 'google/protobuf/compiler/java/java_message_field.cc', 'google/protobuf/compiler/java/java_message_builder_lite.cc', 'google/protobuf/compiler/java/java_message_builder.cc', 'google/protobuf/compiler/java/java_message.cc', 'google/protobuf/compiler/java/java_map_field_lite.cc', 'google/protobuf/compiler/java/java_map_field.cc', 'google/protobuf/compiler/java/java_lazy_message_field_lite.cc', 'google/protobuf/compiler/java/java_lazy_message_field.cc', 'google/protobuf/compiler/java/java_helpers.cc', 'google/protobuf/compiler/java/java_generator_factory.cc', 'google/protobuf/compiler/java/java_generator.cc', 'google/protobuf/compiler/java/java_file.cc', 'google/protobuf/compiler/java/java_field.cc', 'google/protobuf/compiler/java/java_extension_lite.cc', 'google/protobuf/compiler/java/java_extension.cc', 'google/protobuf/compiler/java/java_enum_lite.cc', 'google/protobuf/compiler/java/java_enum_field_lite.cc', 'google/protobuf/compiler/java/java_enum_field.cc', 'google/protobuf/compiler/java/java_enum.cc', 'google/protobuf/compiler/java/java_doc_comment.cc', 'google/protobuf/compiler/java/java_context.cc', 'google/protobuf/compiler/csharp/csharp_wrapper_field.cc', 'google/protobuf/compiler/csharp/csharp_source_generator_base.cc', 'google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_message_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_reflection_class.cc', 'google/protobuf/compiler/csharp/csharp_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_message_field.cc', 'google/protobuf/compiler/csharp/csharp_message.cc', 'google/protobuf/compiler/csharp/csharp_map_field.cc', 'google/protobuf/compiler/csharp/csharp_helpers.cc', 'google/protobuf/compiler/csharp/csharp_generator.cc', 'google/protobuf/compiler/csharp/csharp_field_base.cc', 'google/protobuf/compiler/csharp/csharp_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_enum.cc', 'google/protobuf/compiler/csharp/csharp_doc_comment.cc', 'google/protobuf/compiler/cpp/cpp_string_field.cc', 'google/protobuf/compiler/cpp/cpp_service.cc', 'google/protobuf/compiler/cpp/cpp_primitive_field.cc', 'google/protobuf/compiler/cpp/cpp_message_field.cc', 'google/protobuf/compiler/cpp/cpp_message.cc', 'google/protobuf/compiler/cpp/cpp_map_field.cc', 'google/protobuf/compiler/cpp/cpp_helpers.cc', 'google/protobuf/compiler/cpp/cpp_generator.cc', 'google/protobuf/compiler/cpp/cpp_file.cc', 'google/protobuf/compiler/cpp/cpp_field.cc', 'google/protobuf/compiler/cpp/cpp_extension.cc', 'google/protobuf/compiler/cpp/cpp_enum_field.cc', 'google/protobuf/compiler/cpp/cpp_enum.cc', 'google/protobuf/compiler/command_line_interface.cc', 'google/protobuf/compiler/code_generator.cc', 'google/protobuf/wrappers.pb.cc', 'google/protobuf/wire_format.cc', 'google/protobuf/util/type_resolver_util.cc', 'google/protobuf/util/time_util.cc', 'google/protobuf/util/message_differencer.cc', 'google/protobuf/util/json_util.cc', 'google/protobuf/util/internal/utility.cc', 'google/protobuf/util/internal/type_info_test_helper.cc', 'google/protobuf/util/internal/type_info.cc', 'google/protobuf/util/internal/protostream_objectwriter.cc', 'google/protobuf/util/internal/protostream_objectsource.cc', 'google/protobuf/util/internal/proto_writer.cc', 'google/protobuf/util/internal/object_writer.cc', 'google/protobuf/util/internal/json_stream_parser.cc', 'google/protobuf/util/internal/json_objectwriter.cc', 'google/protobuf/util/internal/json_escaping.cc', 'google/protobuf/util/internal/field_mask_utility.cc', 'google/protobuf/util/internal/error_listener.cc', 'google/protobuf/util/internal/default_value_objectwriter.cc', 'google/protobuf/util/internal/datapiece.cc', 'google/protobuf/util/field_mask_util.cc', 'google/protobuf/util/field_comparator.cc', 'google/protobuf/unknown_field_set.cc', 'google/protobuf/type.pb.cc', 'google/protobuf/timestamp.pb.cc', 'google/protobuf/text_format.cc', 'google/protobuf/stubs/substitute.cc', 'google/protobuf/stubs/mathlimits.cc', 'google/protobuf/struct.pb.cc', 'google/protobuf/source_context.pb.cc', 'google/protobuf/service.cc', 'google/protobuf/reflection_ops.cc', 'google/protobuf/message.cc', 'google/protobuf/map_field.cc', 'google/protobuf/io/zero_copy_stream_impl.cc', 'google/protobuf/io/tokenizer.cc', 'google/protobuf/io/strtod.cc', 'google/protobuf/io/printer.cc', 'google/protobuf/io/gzip_stream.cc', 'google/protobuf/generated_message_reflection.cc', 'google/protobuf/field_mask.pb.cc', 'google/protobuf/extension_set_heavy.cc', 'google/protobuf/empty.pb.cc', 'google/protobuf/dynamic_message.cc', 'google/protobuf/duration.pb.cc', 'google/protobuf/descriptor_database.cc', 'google/protobuf/descriptor.pb.cc', 'google/protobuf/descriptor.cc', 'google/protobuf/compiler/parser.cc', 'google/protobuf/compiler/importer.cc', 'google/protobuf/api.pb.cc', 'google/protobuf/any.pb.cc', 'google/protobuf/any.cc', 'google/protobuf/wire_format_lite.cc', 'google/protobuf/stubs/time.cc', 'google/protobuf/stubs/strutil.cc', 'google/protobuf/stubs/structurally_valid.cc', 'google/protobuf/stubs/stringprintf.cc', 'google/protobuf/stubs/stringpiece.cc', 'google/protobuf/stubs/statusor.cc', 'google/protobuf/stubs/status.cc', 'google/protobuf/stubs/once.cc', 'google/protobuf/stubs/int128.cc', 'google/protobuf/stubs/common.cc', 'google/protobuf/stubs/bytestream.cc', 'google/protobuf/stubs/atomicops_internals_x86_msvc.cc', 'google/protobuf/stubs/atomicops_internals_x86_gcc.cc', 'google/protobuf/repeated_field.cc', 'google/protobuf/message_lite.cc', 'google/protobuf/io/zero_copy_stream_impl_lite.cc', 'google/protobuf/io/zero_copy_stream.cc', 'google/protobuf/io/coded_stream.cc', 'google/protobuf/generated_message_util.cc', 'google/protobuf/extension_set.cc', 'google/protobuf/arenastring.cc', 'google/protobuf/arena.cc', 'google/protobuf/compiler/js/embed.cc']
-PROTO_FILES=['google/protobuf/wrappers.proto', 'google/protobuf/type.proto', 'google/protobuf/timestamp.proto', 'google/protobuf/struct.proto', 'google/protobuf/source_context.proto', 'google/protobuf/field_mask.proto', 'google/protobuf/empty.proto', 'google/protobuf/duration.proto', 'google/protobuf/descriptor.proto', 'google/protobuf/compiler/plugin.proto', 'google/protobuf/api.proto', 'google/protobuf/any.proto']
+CC_FILES=['google/protobuf/compiler/zip_writer.cc', 'google/protobuf/compiler/subprocess.cc', 'google/protobuf/compiler/ruby/ruby_generator.cc', 'google/protobuf/compiler/python/python_generator.cc', 'google/protobuf/compiler/profile.pb.cc', 'google/protobuf/compiler/plugin.pb.cc', 'google/protobuf/compiler/plugin.cc', 'google/protobuf/compiler/php/php_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_primitive_field.cc', 'google/protobuf/compiler/objectivec/objectivec_oneof.cc', 'google/protobuf/compiler/objectivec/objectivec_message_field.cc', 'google/protobuf/compiler/objectivec/objectivec_message.cc', 'google/protobuf/compiler/objectivec/objectivec_map_field.cc', 'google/protobuf/compiler/objectivec/objectivec_helpers.cc', 'google/protobuf/compiler/objectivec/objectivec_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_file.cc', 'google/protobuf/compiler/objectivec/objectivec_field.cc', 'google/protobuf/compiler/objectivec/objectivec_extension.cc', 'google/protobuf/compiler/objectivec/objectivec_enum_field.cc', 'google/protobuf/compiler/objectivec/objectivec_enum.cc', 'google/protobuf/compiler/js/well_known_types_embed.cc', 'google/protobuf/compiler/js/js_generator.cc', 'google/protobuf/compiler/javanano/javanano_primitive_field.cc', 'google/protobuf/compiler/javanano/javanano_message_field.cc', 'google/protobuf/compiler/javanano/javanano_message.cc', 'google/protobuf/compiler/javanano/javanano_map_field.cc', 'google/protobuf/compiler/javanano/javanano_helpers.cc', 'google/protobuf/compiler/javanano/javanano_generator.cc', 'google/protobuf/compiler/javanano/javanano_file.cc', 'google/protobuf/compiler/javanano/javanano_field.cc', 'google/protobuf/compiler/javanano/javanano_extension.cc', 'google/protobuf/compiler/javanano/javanano_enum_field.cc', 'google/protobuf/compiler/javanano/javanano_enum.cc', 'google/protobuf/compiler/java/java_string_field_lite.cc', 'google/protobuf/compiler/java/java_string_field.cc', 'google/protobuf/compiler/java/java_shared_code_generator.cc', 'google/protobuf/compiler/java/java_service.cc', 'google/protobuf/compiler/java/java_primitive_field_lite.cc', 'google/protobuf/compiler/java/java_primitive_field.cc', 'google/protobuf/compiler/java/java_name_resolver.cc', 'google/protobuf/compiler/java/java_message_lite.cc', 'google/protobuf/compiler/java/java_message_field_lite.cc', 'google/protobuf/compiler/java/java_message_field.cc', 'google/protobuf/compiler/java/java_message_builder_lite.cc', 'google/protobuf/compiler/java/java_message_builder.cc', 'google/protobuf/compiler/java/java_message.cc', 'google/protobuf/compiler/java/java_map_field_lite.cc', 'google/protobuf/compiler/java/java_map_field.cc', 'google/protobuf/compiler/java/java_lazy_message_field_lite.cc', 'google/protobuf/compiler/java/java_lazy_message_field.cc', 'google/protobuf/compiler/java/java_helpers.cc', 'google/protobuf/compiler/java/java_generator_factory.cc', 'google/protobuf/compiler/java/java_generator.cc', 'google/protobuf/compiler/java/java_file.cc', 'google/protobuf/compiler/java/java_field.cc', 'google/protobuf/compiler/java/java_extension_lite.cc', 'google/protobuf/compiler/java/java_extension.cc', 'google/protobuf/compiler/java/java_enum_lite.cc', 'google/protobuf/compiler/java/java_enum_field_lite.cc', 'google/protobuf/compiler/java/java_enum_field.cc', 'google/protobuf/compiler/java/java_enum.cc', 'google/protobuf/compiler/java/java_doc_comment.cc', 'google/protobuf/compiler/java/java_context.cc', 'google/protobuf/compiler/csharp/csharp_wrapper_field.cc', 'google/protobuf/compiler/csharp/csharp_source_generator_base.cc', 'google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_message_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_reflection_class.cc', 'google/protobuf/compiler/csharp/csharp_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_message_field.cc', 'google/protobuf/compiler/csharp/csharp_message.cc', 'google/protobuf/compiler/csharp/csharp_map_field.cc', 'google/protobuf/compiler/csharp/csharp_helpers.cc', 'google/protobuf/compiler/csharp/csharp_generator.cc', 'google/protobuf/compiler/csharp/csharp_field_base.cc', 'google/protobuf/compiler/csharp/csharp_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_enum.cc', 'google/protobuf/compiler/csharp/csharp_doc_comment.cc', 'google/protobuf/compiler/cpp/cpp_string_field.cc', 'google/protobuf/compiler/cpp/cpp_service.cc', 'google/protobuf/compiler/cpp/cpp_primitive_field.cc', 'google/protobuf/compiler/cpp/cpp_message_field.cc', 'google/protobuf/compiler/cpp/cpp_message.cc', 'google/protobuf/compiler/cpp/cpp_map_field.cc', 'google/protobuf/compiler/cpp/cpp_helpers.cc', 'google/protobuf/compiler/cpp/cpp_generator.cc', 'google/protobuf/compiler/cpp/cpp_file.cc', 'google/protobuf/compiler/cpp/cpp_field.cc', 'google/protobuf/compiler/cpp/cpp_extension.cc', 'google/protobuf/compiler/cpp/cpp_enum_field.cc', 'google/protobuf/compiler/cpp/cpp_enum.cc', 'google/protobuf/compiler/command_line_interface.cc', 'google/protobuf/compiler/code_generator.cc', 'google/protobuf/wrappers.pb.cc', 'google/protobuf/wire_format.cc', 'google/protobuf/util/type_resolver_util.cc', 'google/protobuf/util/time_util.cc', 'google/protobuf/util/message_differencer.cc', 'google/protobuf/util/json_util.cc', 'google/protobuf/util/internal/utility.cc', 'google/protobuf/util/internal/type_info_test_helper.cc', 'google/protobuf/util/internal/type_info.cc', 'google/protobuf/util/internal/protostream_objectwriter.cc', 'google/protobuf/util/internal/protostream_objectsource.cc', 'google/protobuf/util/internal/proto_writer.cc', 'google/protobuf/util/internal/object_writer.cc', 'google/protobuf/util/internal/json_stream_parser.cc', 'google/protobuf/util/internal/json_objectwriter.cc', 'google/protobuf/util/internal/json_escaping.cc', 'google/protobuf/util/internal/field_mask_utility.cc', 'google/protobuf/util/internal/error_listener.cc', 'google/protobuf/util/internal/default_value_objectwriter.cc', 'google/protobuf/util/internal/datapiece.cc', 'google/protobuf/util/field_mask_util.cc', 'google/protobuf/util/field_comparator.cc', 'google/protobuf/util/delimited_message_util.cc', 'google/protobuf/unknown_field_set.cc', 'google/protobuf/type.pb.cc', 'google/protobuf/timestamp.pb.cc', 'google/protobuf/text_format.cc', 'google/protobuf/stubs/substitute.cc', 'google/protobuf/stubs/mathlimits.cc', 'google/protobuf/struct.pb.cc', 'google/protobuf/source_context.pb.cc', 'google/protobuf/service.cc', 'google/protobuf/reflection_ops.cc', 'google/protobuf/message.cc', 'google/protobuf/map_field.cc', 'google/protobuf/io/zero_copy_stream_impl.cc', 'google/protobuf/io/tokenizer.cc', 'google/protobuf/io/strtod.cc', 'google/protobuf/io/printer.cc', 'google/protobuf/io/gzip_stream.cc', 'google/protobuf/generated_message_reflection.cc', 'google/protobuf/field_mask.pb.cc', 'google/protobuf/extension_set_heavy.cc', 'google/protobuf/empty.pb.cc', 'google/protobuf/dynamic_message.cc', 'google/protobuf/duration.pb.cc', 'google/protobuf/descriptor_database.cc', 'google/protobuf/descriptor.pb.cc', 'google/protobuf/descriptor.cc', 'google/protobuf/compiler/parser.cc', 'google/protobuf/compiler/importer.cc', 'google/protobuf/api.pb.cc', 'google/protobuf/any.pb.cc', 'google/protobuf/any.cc', 'google/protobuf/wire_format_lite.cc', 'google/protobuf/stubs/time.cc', 'google/protobuf/stubs/strutil.cc', 'google/protobuf/stubs/structurally_valid.cc', 'google/protobuf/stubs/stringprintf.cc', 'google/protobuf/stubs/stringpiece.cc', 'google/protobuf/stubs/statusor.cc', 'google/protobuf/stubs/status.cc', 'google/protobuf/stubs/once.cc', 'google/protobuf/stubs/int128.cc', 'google/protobuf/stubs/common.cc', 'google/protobuf/stubs/bytestream.cc', 'google/protobuf/stubs/atomicops_internals_x86_msvc.cc', 'google/protobuf/stubs/atomicops_internals_x86_gcc.cc', 'google/protobuf/repeated_field.cc', 'google/protobuf/message_lite.cc', 'google/protobuf/io/zero_copy_stream_impl_lite.cc', 'google/protobuf/io/zero_copy_stream.cc', 'google/protobuf/io/coded_stream.cc', 'google/protobuf/generated_message_util.cc', 'google/protobuf/extension_set.cc', 'google/protobuf/arenastring.cc', 'google/protobuf/arena.cc', 'google/protobuf/compiler/js/embed.cc']
+PROTO_FILES=['google/protobuf/wrappers.proto', 'google/protobuf/type.proto', 'google/protobuf/timestamp.proto', 'google/protobuf/struct.proto', 'google/protobuf/source_context.proto', 'google/protobuf/field_mask.proto', 'google/protobuf/empty.proto', 'google/protobuf/duration.proto', 'google/protobuf/descriptor.proto', 'google/protobuf/compiler/profile.proto', 'google/protobuf/compiler/plugin.proto', 'google/protobuf/api.proto', 'google/protobuf/any.proto']
 
 CC_INCLUDE='third_party/protobuf/src'
 PROTO_INCLUDE='third_party/protobuf/src'
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index 211d442f1767462adf0f3c6d616812c2d3a83408..43b60b142f3f747450fd62059413d0726dd9d507 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -211,7 +211,7 @@ setuptools.setup(
   ext_modules=extension_modules(),
   packages=setuptools.find_packages('.'),
   install_requires=[
-    'protobuf>=3.2.0',
+    'protobuf>=3.3.0',
     'grpcio>={version}'.format(version=grpc_version.VERSION),
   ],
   package_data=package_data(),
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile
similarity index 90%
rename from tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
rename to tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile
index c099f339aeee5607f02a0718a239690bcede2de2..0a62f1c2c0f8636092ebecd1143f6323e859cef1 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile
@@ -1,4 +1,4 @@
-# Copyright 2016, Google Inc.
+# Copyright 2017, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,12 +27,10 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-FROM golang:latest
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
+FROM golang:1.7
 
+# Using login shell removes Go from path, so we add it.
+RUN ln -s /usr/local/go/bin/go /usr/local/bin
 
 #====================
 # Python dependencies
@@ -49,8 +47,5 @@ RUN pip install pip --upgrade
 RUN pip install virtualenv
 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
 
-# Using login shell removes Go from path, so we add it.
-RUN ln -s /usr/local/go/bin/go /usr/local/bin
-
 # Define the default command.
 CMD ["bash"]
diff --git a/tools/run_tests/stress_test/cleanup_docker_images.sh b/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile
old mode 100755
new mode 100644
similarity index 75%
rename from tools/run_tests/stress_test/cleanup_docker_images.sh
rename to tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile
index e424fcfd99b27a0e165d5ad328d39dd86c07ccb9..abf38b817a5ab6ddb501190e1e13c8cc1f2076f5
--- a/tools/run_tests/stress_test/cleanup_docker_images.sh
+++ b/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile
@@ -1,4 +1,3 @@
-#!/bin/bash
 # Copyright 2017, Google Inc.
 # All rights reserved.
 #
@@ -27,5 +26,26 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-for img in `docker images | grep \<none\> | awk '{print  $3 }'` ; do docker rmi -f $img; done
 
+FROM golang:1.8
+
+# Using login shell removes Go from path, so we add it.
+RUN ln -s /usr/local/go/bin/go /usr/local/bin
+
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
deleted file mode 100644
index 12d8d091848444787da1ad59c1103727a99a48d7..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-#=================
-# C++ dependencies
-RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-#================
-# C# dependencies
-
-# Update to a newer version of mono
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-
-# Install dependencies
-RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \
-    mono-devel \
-    ca-certificates-mono \
-    nuget \
-    && apt-get clean
-
-RUN nuget update -self
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/build_interop_stress.sh
deleted file mode 100755
index 345196894ef6a71e888a3fea739af70548c1f473..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/build_interop_stress.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds C# interop server and client in a base image.
-set -e
-
-mkdir -p /var/local/git
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# Copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-cd /var/local/git/grpc
-
-# Build C++ metrics client (to query the metrics from csharp stress client)
-make metrics_client -j
-
-# Build C# interop client & server
-tools/run_tests/run_tests.py -l csharp -c dbg --build_only
-
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
deleted file mode 100644
index d0f66d9955623c6714800aabd00a59feec761edb..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2015-2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-#=================
-# C++ dependencies
-RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-#=================
-# Update clang to a version with improved tsan and fuzzing capabilities
-
-RUN apt-get update && apt-get -y install python cmake && apt-get clean
-
-RUN git clone -n -b release_38 http://llvm.org/git/llvm.git && \
-  cd llvm && git checkout ad57503 && cd ..
-RUN git clone -n -b release_38 http://llvm.org/git/clang.git && \
-  cd clang && git checkout ad2c56e && cd ..
-RUN git clone -n -b release_38 http://llvm.org/git/compiler-rt.git && \
-  cd compiler-rt && git checkout 3176922 && cd ..
-RUN git clone -n -b release_38 \
-  http://llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && \
-  git checkout c288525 && cd ..
-RUN git clone -n -b release_38 http://llvm.org/git/libcxx.git && \
-  cd libcxx && git checkout fda3549  && cd ..
-RUN git clone -n -b release_38 http://llvm.org/git/libcxxabi.git && \
-  cd libcxxabi && git checkout 8d4e51d && cd ..
-
-RUN mv clang llvm/tools
-RUN mv compiler-rt llvm/projects
-RUN mv clang-tools-extra llvm/tools/clang/tools
-RUN mv libcxx llvm/projects
-RUN mv libcxxabi llvm/projects
-
-RUN mkdir llvm-build
-RUN cd llvm-build && cmake \
-  -DCMAKE_BUILD_TYPE:STRING=Release \
-  -DCMAKE_INSTALL_PREFIX:STRING=/usr \
-  -DLLVM_TARGETS_TO_BUILD:STRING=X86 \
-  ../llvm
-RUN make -C llvm-build -j 12 && make -C llvm-build install && rm -rf llvm-build
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/build_interop_stress.sh
deleted file mode 100755
index 92d1f80fe60dad2956df412bad3e49b872bf356b..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/build_interop_stress.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds C++ interop server and client in a base image.
-set -e
-
-mkdir -p /var/local/git
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-cd /var/local/git/grpc
-
-make install-certs
-
-BUILD_TYPE=${BUILD_TYPE:=opt}
-
-# build C++ interop stress client, interop client and server
-make CONFIG=$BUILD_TYPE stress_test metrics_client interop_client interop_server
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_go/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_go/build_interop_stress.sh
deleted file mode 100755
index 9e4769cf3341e199206dace883b0207fcf8cd3fb..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_go/build_interop_stress.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds Go interop server, Stress client and metrics client in a base image.
-set -e
-
-# Clone just the grpc-go source code without any dependencies.
-# We are cloning from a local git repo that contains the right revision
-# to test instead of using "go get" to download from Github directly.
-git clone --recursive /var/local/jenkins/grpc-go src/google.golang.org/grpc
-
-# Clone the 'grpc' repo. We just need this for the wrapper scripts under
-# grpc/tools/gcp/stress_tests
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-# Get dependencies from GitHub
-# NOTE: once grpc-go dependencies change, this needs to be updated manually
-# but we don't expect this to happen any time soon.
-go get github.com/golang/protobuf/proto
-go get golang.org/x/net/context
-go get golang.org/x/net/trace
-go get golang.org/x/oauth2
-go get google.golang.org/cloud
-
-# Build the interop server, stress client and stress metrics client
-(cd src/google.golang.org/grpc/interop/server && go install)
-(cd src/google.golang.org/grpc/stress/client && go install)
-(cd src/google.golang.org/grpc/stress/metrics_client && go install)
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
deleted file mode 100644
index 229ea469c42fa0acd389018256bf4962948b7a77..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-#=================
-# C++ dependencies
-RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-# Install JDK 8 and Git
-#
-RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections && \
-  echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee /etc/apt/sources.list.d/webupd8team-java.list && \
-  echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list && \
-  apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
-
-RUN apt-get update && apt-get -y install \
-      git \
-      libapr1 \
-      oracle-java8-installer \
-      && \
-    apt-get clean && rm -r /var/cache/oracle-jdk8-installer/
-
-ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
-ENV PATH $PATH:$JAVA_HOME/bin
-
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_java/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_java/build_interop_stress.sh
deleted file mode 100755
index 0194860d101309dc54a75e3ac754dcf60164eb58..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_java/build_interop_stress.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds C++ interop server and client in a base image.
-set -e
-
-mkdir -p /var/local/git
-# grpc-java repo
-git clone --recursive --depth 1 /var/local/jenkins/grpc-java /var/local/git/grpc-java
-
-# grpc repo (for metrics client and for the stress test wrapper scripts)
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# Copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-# First build the metrics client in grpc repo
-cd /var/local/git/grpc
-make metrics_client
-
-# Build all interop test targets (which includes interop server and stress test
-# client) in grpc-java repo
-cd /var/local/git/grpc-java
-./gradlew :grpc-interop-testing:installDist -PskipCodegen=true
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
deleted file mode 100644
index 5fd0bc0eb2148937705a0984aea313df78c5bfcb..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-#==================
-# Node dependencies
-
-# Install nvm
-RUN touch .profile
-RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
-# Install all versions of node that we want to test
-RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
-RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
-RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
-RUN /bin/bash -l -c "nvm alias default 4"
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-
-RUN mkdir /var/local/jenkins
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh
deleted file mode 100755
index 4116f842ff1264198f27bc053d5e5d14fd00a439..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds Node interop server and client in a base image.
-set -e
-
-mkdir -p /var/local/git
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-cd /var/local/git/grpc
-
-# build Node interop client & server
-npm install -g node-gyp
-npm install --unsafe-perm --build-from-source
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
deleted file mode 100644
index b5198b46529d102d4d92943c8b781996bb1f9bf7..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-#==================
-# Ruby dependencies
-
-# Install rvm
-RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
-RUN \curl -sSL https://get.rvm.io | bash -s stable
-
-# Install Ruby 2.1
-RUN /bin/bash -l -c "rvm install ruby-2.1"
-RUN /bin/bash -l -c "rvm use --default ruby-2.1"
-RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
-RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
-RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
-RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-#=================
-# PHP dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    git php5 php5-dev phpunit unzip
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-
-RUN mkdir /var/local/jenkins
-
-# Install composer
-RUN curl -sS https://getcomposer.org/installer | php
-RUN mv composer.phar /usr/local/bin/composer
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_php/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_php/build_interop_stress.sh
deleted file mode 100755
index e3cca085a4d7c6a93d021326244720fbffc052ba..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_php/build_interop_stress.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds PHP interop server and client in a base image.
-set -ex
-
-mkdir -p /var/local/git
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-cd /var/local/git/grpc
-
-make install-certs
-
-# gRPC core and protobuf need to be installed
-make install
-
-(cd src/php/ext/grpc && phpize && ./configure && make)
-
-(cd third_party/protobuf && make install)
-
-(cd src/php && php -d extension=ext/grpc/modules/grpc.so /usr/local/bin/composer install)
-
-(cd src/php && ./bin/generate_proto_php.sh)
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
deleted file mode 100644
index 8e1de51f33171cb2ea9005dc261a0390f7c443dd..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-#=================
-# C++ dependencies
-RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-
-RUN pip install coverage
-RUN pip install oauth2client
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh
deleted file mode 100755
index 1c7dc2bd577fc9c6ae630f3bb487de4b5b275de4..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-# Copyright 2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds Python interop server and client in a base image.
-set -e
-
-mkdir -p /var/local/git
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-cd /var/local/git/grpc
-tools/run_tests/run_tests.py -l python -c opt --build_only
-
-# Build c++ interop client
-make metrics_client -j
-
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
deleted file mode 100644
index 9d291aac5830fed2d7e949dcf60cf645f9452d45..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
-  autoconf \
-  autotools-dev \
-  build-essential \
-  bzip2 \
-  ccache \
-  curl \
-  gcc \
-  gcc-multilib \
-  git \
-  golang \
-  gyp \
-  lcov \
-  libc6 \
-  libc6-dbg \
-  libc6-dev \
-  libgtest-dev \
-  libtool \
-  make \
-  perl \
-  strace \
-  python-dev \
-  python-setuptools \
-  python-yaml \
-  telnet \
-  unzip \
-  wget \
-  zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
-    python-all-dev \
-    python3-all-dev \
-    python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-#=================
-# C++ dependencies
-RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
-
-# Google Cloud platform API libraries
-RUN apt-get update && apt-get install -y python-pip && apt-get clean
-RUN pip install --upgrade google-api-python-client
-
-
-#==================
-# Ruby dependencies
-
-# Install rvm
-RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
-RUN \curl -sSL https://get.rvm.io | bash -s stable
-
-# Install Ruby 2.1
-RUN /bin/bash -l -c "rvm install ruby-2.1"
-RUN /bin/bash -l -c "rvm use --default ruby-2.1"
-RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
-RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
-RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
-RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh
deleted file mode 100755
index 019f0a44e4c1ec60bbf782ee9190936a2bb5cd9c..0000000000000000000000000000000000000000
--- a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Builds Ruby interop server and client in a base image.
-set -e
-
-mkdir -p /var/local/git
-git clone /var/local/jenkins/grpc /var/local/git/grpc
-# clone gRPC submodules, use data from locally cloned submodules where possible
-(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
-&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
-${name}')
-
-# Copy service account keys if available
-cp -r /var/local/jenkins/service_account $HOME || true
-
-cd /var/local/git/grpc
-rvm --default use ruby-2.1
-
-# Build Ruby interop client and server
-(cd src/ruby && gem update bundler && bundle && rake compile)
-
-# Build c++ metrics client to query the metrics from ruby stress client
-make metrics_client -j
-
diff --git a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
index f9e709dccb1fbfc6a9bc0aa4ae28f7742f76838f..0b21a222263df8dc0c57b5a39a29520a89a4e096 100644
--- a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
index 4bb97c7aa9e5b97048df3b47ce6c30a076e77d9f..d9dc272a1b293fefe8f3de60ada749f2c296ec94 100644
--- a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
index c4b710b5dfb96c842b6f8c0d9d4d5ffbd3479945..11ef52d1c066702e25bb666b212d121295b24c1e 100644
--- a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
+++ b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
index bd742dff341732c5a13e2e63165520780ce72f75..41d3b2b520cb03d7393c3540dd26d03b0954fac0 100644
--- a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
index bc46b3055a6df1b54f8ad79c5bd141716052b59c..23d6fb8c41180e14b1247278278857031acda3c3 100644
--- a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
index f7d7f542c11e61f0f2b0aee4191d55b43b8af63a..1848e5e5c895221bb33d083ae6cc28050eee425b 100644
--- a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/fuzzer/Dockerfile b/tools/dockerfile/test/fuzzer/Dockerfile
index b398b70b64fff8704d88f734050f6267d587e4e2..4200ba0b266fd00560e83f68eec271e4b75066cb 100644
--- a/tools/dockerfile/test/fuzzer/Dockerfile
+++ b/tools/dockerfile/test/fuzzer/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
index c1cce0a1417097fe4a6a4790c0789d692d800dc6..9b50d85e2f3906d0a1b9219d3c9718b9ef9ddf79 100644
--- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #================
 # C# dependencies
 
diff --git a/tools/dockerfile/test/node_jessie_x64/Dockerfile b/tools/dockerfile/test/node_jessie_x64/Dockerfile
index 4595aa6bea1235ff62374f04751b4d19e1297950..deef892952921cb3f99590f711a3fe6ed79345e6 100644
--- a/tools/dockerfile/test/node_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/node_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 
 # Install Electron apt dependencies
 RUN apt-get update && apt-get install -y \
diff --git a/tools/dockerfile/test/php7_jessie_x64/Dockerfile b/tools/dockerfile/test/php7_jessie_x64/Dockerfile
index 0e2c103afd137da4184773c676a482ac357d2eda..6057c2d6eb8a2f614df3e9d3654c34dc394926e6 100644
--- a/tools/dockerfile/test/php7_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/php7_jessie_x64/Dockerfile
@@ -75,6 +75,10 @@ RUN cd /var/local/git/php-src \
   && make \
   && make install
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/php_jessie_x64/Dockerfile b/tools/dockerfile/test/php_jessie_x64/Dockerfile
index c6f3dde39a5cc86ab284975316f663fb859f97db..1510c3649c3e7927516b39b8c17b7c7abd0eed60 100644
--- a/tools/dockerfile/test/php_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/php_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/python_jessie_x64/Dockerfile b/tools/dockerfile/test/python_jessie_x64/Dockerfile
index 94c17078d342177cea1b330709e38a35a08d621f..cc69f4b5cd5a936ba0b3d53d98005d232af1304e 100644
--- a/tools/dockerfile/test/python_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/python_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/python_pyenv_x64/Dockerfile b/tools/dockerfile/test/python_pyenv_x64/Dockerfile
index 435a9fdc978feb2accde7745c25057c78de94709..a105d334da7df5237df35c93692dae5533ce1bd7 100644
--- a/tools/dockerfile/test/python_pyenv_x64/Dockerfile
+++ b/tools/dockerfile/test/python_pyenv_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
index 679c8ff47a699c66a01959c0d248aa905f04fc78..0a5c9a633d50b1142e66a2e1fdaac75b82e53779 100644
--- a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile
index 0da2a1914acffb92cb7784c8c949ce2e92a8437b..76923303ea483f1f72b393036acdfbbd6a31fcdf 100644
--- a/tools/dockerfile/test/sanity/Dockerfile
+++ b/tools/dockerfile/test/sanity/Dockerfile
@@ -63,6 +63,10 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
 #====================
 # Python dependencies
 
diff --git a/tools/gce/linux_worker_init.sh b/tools/gce/linux_worker_init.sh
index d552343bde611917ef4a457f788d23c285651a8d..f795980aa833ce96a1c97e5ba7fb8d0e50eca177 100755
--- a/tools/gce/linux_worker_init.sh
+++ b/tools/gce/linux_worker_init.sh
@@ -61,6 +61,10 @@ sudo usermod -aG docker jenkins
 # see https://github.com/grpc/grpc/issues/4988
 printf "{\n\t\"storage-driver\": \"overlay\"\n}" | sudo tee /etc/docker/daemon.json
 
+# Install pip and Google API library to enable using GCP services
+sudo apt-get install -y python-pip
+sudo pip install google-api-python-client
+
 # Install RVM
 # TODO(jtattermusch): why is RVM needed?
 gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
diff --git a/tools/gcp/stress_test/run_client.py b/tools/gcp/stress_test/run_client.py
deleted file mode 100755
index 51ada6820da6bc91f93f5aa5aeb51da4cba02258..0000000000000000000000000000000000000000
--- a/tools/gcp/stress_test/run_client.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015-2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import datetime
-import os
-import re
-import resource
-import select
-import subprocess
-import sys
-import time
-
-from stress_test_utils import EventType
-from stress_test_utils import BigQueryHelper
-
-
-# TODO (sree): Write a python grpc client to directly query the metrics instead
-# of calling metrics_client
-def _get_qps(metrics_cmd):
-  qps = 0
-  try:
-    # Note: gpr_log() writes even non-error messages to stderr stream. So it is 
-    # important that we set stderr=subprocess.STDOUT
-    p = subprocess.Popen(args=metrics_cmd,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT)
-    retcode = p.wait()
-    (out_str, err_str) = p.communicate()
-    if retcode != 0:
-      print 'Error in reading metrics information'
-      print 'Output: ', out_str
-    else:
-      # The overall qps is printed at the end of the line
-      m = re.search('\d+$', out_str)
-      qps = int(m.group()) if m else 0
-  except Exception as ex:
-    print 'Exception while reading metrics information: ' + str(ex)
-  return qps
-
-
-def run_client():
-  """This is a wrapper around the stress test client and performs the following:
-      1) Create the following two tables in Big Query:
-         (i) Summary table: To record events like the test started, completed
-                            successfully or failed
-        (ii) Qps table: To periodically record the QPS sent by this client
-      2) Start the stress test client and add a row in the Big Query summary
-         table
-      3) Once every few seconds (as specificed by the poll_interval_secs) poll
-         the status of the stress test client process and perform the
-         following:
-          3.1) If the process is still running, get the current qps by invoking
-               the metrics client program and add a row in the Big Query
-               Qps table. Sleep for a duration specified by poll_interval_secs
-          3.2) If the process exited successfully, add a row in the Big Query
-               Summary table and exit
-          3.3) If the process failed, add a row in Big Query summary table and
-               wait forever.
-               NOTE: This script typically runs inside a GKE pod which means
-               that the pod gets destroyed when the script exits. However, in
-               case the stress test client fails, we would not want the pod to
-               be destroyed (since we might want to connect to the pod for
-               examining logs). This is the reason why the script waits forever
-               in case of failures
-  """
-  # Set the 'core file' size to 'unlimited' so that 'core' files are generated
-  # if the client crashes (Note: This is not relevant for Java and Go clients)
-  resource.setrlimit(resource.RLIMIT_CORE,
-                     (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
-
-  env = dict(os.environ)
-  image_type = env['STRESS_TEST_IMAGE_TYPE']
-  stress_client_cmd = env['STRESS_TEST_CMD'].split()
-  args_str = env['STRESS_TEST_ARGS_STR']
-  metrics_client_cmd = env['METRICS_CLIENT_CMD'].split()
-  metrics_client_args_str = env['METRICS_CLIENT_ARGS_STR']
-  run_id = env['RUN_ID']
-  pod_name = env['POD_NAME']
-  logfile_name = env.get('LOGFILE_NAME')
-  poll_interval_secs = float(env['POLL_INTERVAL_SECS'])
-  project_id = env['GCP_PROJECT_ID']
-  dataset_id = env['DATASET_ID']
-  summary_table_id = env['SUMMARY_TABLE_ID']
-  qps_table_id = env['QPS_TABLE_ID']
-  # The following parameter is to inform us whether the stress client runs
-  # forever until forcefully stopped or will it naturally stop after sometime.
-  # This way, we know that the stress client process should not terminate (even
-  # if it does with a success exit code) and flag the termination as a failure
-  will_run_forever = env.get('WILL_RUN_FOREVER', '1')
-
-  bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
-                             dataset_id, summary_table_id, qps_table_id)
-  bq_helper.initialize()
-
-  # Create BigQuery Dataset and Tables: Summary Table and Metrics Table
-  if not bq_helper.setup_tables():
-    print 'Error in creating BigQuery tables'
-    return
-
-  start_time = datetime.datetime.now()
-
-  logfile = None
-  details = 'Logging to stdout'
-  if logfile_name is not None:
-    print 'Opening logfile: %s ...' % logfile_name
-    details = 'Logfile: %s' % logfile_name
-    logfile = open(logfile_name, 'w')
-
-  metrics_cmd = metrics_client_cmd + [x
-                                      for x in metrics_client_args_str.split()]
-  stress_cmd = stress_client_cmd + [x for x in args_str.split()]
-
-  details = '%s, Metrics command: %s, Stress client command: %s' % (
-      details, str(metrics_cmd), str(stress_cmd))
-  # Update status that the test is starting (in the status table)
-  bq_helper.insert_summary_row(EventType.STARTING, details)
-
-  print 'Launching process %s ...' % stress_cmd
-  stress_p = subprocess.Popen(args=stress_cmd,
-                              stdout=logfile,
-                              stderr=subprocess.STDOUT)
-
-  qps_history = [1, 1, 1]  # Maintain the last 3 qps readings
-  qps_history_idx = 0  # Index into the qps_history list
-
-  is_running_status_written = False
-  is_error = False
-  while True:
-    # Check if stress_client is still running. If so, collect metrics and upload
-    # to BigQuery status table
-    # If stress_p.poll() is not None, it means that the stress client terminated
-    if stress_p.poll() is not None:
-      end_time = datetime.datetime.now().isoformat()
-      event_type = EventType.SUCCESS
-      details = 'End time: %s' % end_time
-      if will_run_forever == '1' or stress_p.returncode != 0:
-        event_type = EventType.FAILURE
-        details = 'Return code = %d. End time: %s' % (stress_p.returncode,
-                                                      end_time)
-        is_error = True
-      bq_helper.insert_summary_row(event_type, details)
-      print details
-      break
-
-    if not is_running_status_written:
-      bq_helper.insert_summary_row(EventType.RUNNING, '')
-      is_running_status_written = True
-
-    # Stress client still running. Get metrics
-    qps = _get_qps(metrics_cmd)
-    qps_recorded_at = datetime.datetime.now().isoformat()
-    print 'qps: %d at %s' % (qps, qps_recorded_at)
-
-    # If QPS has been zero for the last 3 iterations, flag it as error and exit
-    qps_history[qps_history_idx] = qps
-    qps_history_idx = (qps_history_idx + 1) % len(qps_history)
-    if sum(qps_history) == 0:
-      details = 'QPS has been zero for the last %d seconds - as of : %s' % (
-          poll_interval_secs * 3, qps_recorded_at)
-      is_error = True
-      bq_helper.insert_summary_row(EventType.FAILURE, details)
-      print details
-      break
-
-    # Upload qps metrics to BiqQuery
-    bq_helper.insert_qps_row(qps, qps_recorded_at)
-
-    time.sleep(poll_interval_secs)
-
-  if is_error:
-    print 'Waiting indefinitely..'
-    select.select([], [], [])
-
-  print 'Completed'
-  return
-
-
-if __name__ == '__main__':
-  run_client()
diff --git a/tools/gcp/stress_test/run_node.sh b/tools/gcp/stress_test/run_node.sh
deleted file mode 100755
index 4a4da6fc8b8e3c750890604c55c09698548d65f9..0000000000000000000000000000000000000000
--- a/tools/gcp/stress_test/run_node.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-# Copyright 2015-2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is a wrapper script that was created to help run_server.py and
-# run_client.py to launch 'node js' stress clients and stress servers
-source ~/.nvm/nvm.sh
-
-set -ex
-
-$@
diff --git a/tools/gcp/stress_test/run_ruby.sh b/tools/gcp/stress_test/run_ruby.sh
deleted file mode 100755
index 80d0567447b54a83b91ea439efd09f97a6042b67..0000000000000000000000000000000000000000
--- a/tools/gcp/stress_test/run_ruby.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-# Copyright 2015-2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is a wrapper script that was created to help run_server.py and
-# run_client.py to launch 'node js' stress clients and stress servers
-source /etc/profile.d/rvm.sh
-
-set -ex
-
-$@
diff --git a/tools/gcp/stress_test/run_server.py b/tools/gcp/stress_test/run_server.py
deleted file mode 100755
index 8f47e42ef32dd0425cea419b80b41b7f04db45d6..0000000000000000000000000000000000000000
--- a/tools/gcp/stress_test/run_server.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015-2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import datetime
-import os
-import resource
-import select
-import subprocess
-import sys
-import time
-
-from stress_test_utils import BigQueryHelper
-from stress_test_utils import EventType
-
-
-def run_server():
-  """This is a wrapper around the interop server and performs the following:
-      1) Create a 'Summary table' in Big Query to record events like the server
-         started, completed successfully or failed. NOTE: This also creates
-         another table called the QPS table which is currently NOT needed on the
-         server (it is needed on the stress test clients)
-      2) Start the server process and add a row in Big Query summary table
-      3) Wait for the server process to terminate. The server process does not
-         terminate unless there is an error.
-         If the server process terminated with a failure, add a row in Big Query
-         and wait forever.
-         NOTE: This script typically runs inside a GKE pod which means that the
-         pod gets destroyed when the script exits. However, in case the server
-         process fails, we would not want the pod to be destroyed (since we
-         might want to connect to the pod for examining logs). This is the
-         reason why the script waits forever in case of failures.
-  """
-  # Set the 'core file' size to 'unlimited' so that 'core' files are generated
-  # if the server crashes (Note: This is not relevant for Java and Go servers)
-  resource.setrlimit(resource.RLIMIT_CORE,
-                     (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
-
-  # Read the parameters from environment variables
-  env = dict(os.environ)
-
-  run_id = env['RUN_ID']  # The unique run id for this test
-  image_type = env['STRESS_TEST_IMAGE_TYPE']
-  stress_server_cmd = env['STRESS_TEST_CMD'].split()
-  args_str = env['STRESS_TEST_ARGS_STR']
-  pod_name = env['POD_NAME']
-  project_id = env['GCP_PROJECT_ID']
-  dataset_id = env['DATASET_ID']
-  summary_table_id = env['SUMMARY_TABLE_ID']
-  qps_table_id = env['QPS_TABLE_ID']
-  # The following parameter is to inform us whether the server runs forever
-  # until forcefully stopped or will it naturally stop after sometime.
-  # This way, we know that the process should not terminate (even if it does
-  # with a success exit code) and flag any termination as a failure.
-  will_run_forever = env.get('WILL_RUN_FOREVER', '1')
-
-  logfile_name = env.get('LOGFILE_NAME')
-
-  print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, '
-        'summary_table_id: %s, qps_table_id: %s') % (pod_name, project_id,
-                                                     run_id, dataset_id,
-                                                     summary_table_id,
-                                                     qps_table_id)
-
-  bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
-                             dataset_id, summary_table_id, qps_table_id)
-  bq_helper.initialize()
-
-  # Create BigQuery Dataset and Tables: Summary Table and Metrics Table
-  if not bq_helper.setup_tables():
-    print 'Error in creating BigQuery tables'
-    return
-
-  start_time = datetime.datetime.now()
-
-  logfile = None
-  details = 'Logging to stdout'
-  if logfile_name is not None:
-    print 'Opening log file: ', logfile_name
-    logfile = open(logfile_name, 'w')
-    details = 'Logfile: %s' % logfile_name
-
-  stress_cmd = stress_server_cmd + [x for x in args_str.split()]
-
-  details = '%s, Stress server command: %s' % (details, str(stress_cmd))
-  # Update status that the test is starting (in the status table)
-  bq_helper.insert_summary_row(EventType.STARTING, details)
-
-  print 'Launching process %s ...' % stress_cmd
-  stress_p = subprocess.Popen(args=stress_cmd,
-                              stdout=logfile,
-                              stderr=subprocess.STDOUT)
-
-  # Update the status to running if subprocess.Popen launched the server
-  if stress_p.poll() is None:
-    bq_helper.insert_summary_row(EventType.RUNNING, '')
-
-  # Wait for the server process to terminate
-  returncode = stress_p.wait()
-
-  if will_run_forever == '1' or returncode != 0:
-    end_time = datetime.datetime.now().isoformat()
-    event_type = EventType.FAILURE
-    details = 'Returncode: %d; End time: %s' % (returncode, end_time)
-    bq_helper.insert_summary_row(event_type, details)
-    print 'Waiting indefinitely..'
-    select.select([], [], [])
-  return returncode
-
-
-if __name__ == '__main__':
-  run_server()
diff --git a/tools/gcp/stress_test/stress_test_utils.py b/tools/gcp/stress_test/stress_test_utils.py
deleted file mode 100755
index be50af31845f77fafcae5681ce0f8c6a8692a127..0000000000000000000000000000000000000000
--- a/tools/gcp/stress_test/stress_test_utils.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import datetime
-import json
-import os
-import re
-import select
-import subprocess
-import sys
-import time
-
-# Import big_query_utils module
-bq_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../utils'))
-sys.path.append(bq_utils_dir)
-import big_query_utils as bq_utils
-
-
-class EventType:
-  STARTING = 'STARTING'
-  RUNNING = 'RUNNING'
-  SUCCESS = 'SUCCESS'
-  FAILURE = 'FAILURE'
-
-
-class BigQueryHelper:
-  """Helper class for the stress test wrappers to interact with BigQuery.
-  """
-
-  def __init__(self, run_id, image_type, pod_name, project_id, dataset_id,
-               summary_table_id, qps_table_id):
-    self.run_id = run_id
-    self.image_type = image_type
-    self.pod_name = pod_name
-    self.project_id = project_id
-    self.dataset_id = dataset_id
-    self.summary_table_id = summary_table_id
-    self.qps_table_id = qps_table_id
-
-  def initialize(self):
-    self.bq = bq_utils.create_big_query()
-
-  def setup_tables(self):
-    return bq_utils.create_dataset(self.bq, self.project_id, self.dataset_id) \
-        and self.__create_summary_table() \
-        and self.__create_qps_table()
-
-  def insert_summary_row(self, event_type, details):
-    row_values_dict = {
-        'run_id': self.run_id,
-        'image_type': self.image_type,
-        'pod_name': self.pod_name,
-        'event_date': datetime.datetime.now().isoformat(),
-        'event_type': event_type,
-        'details': details
-    }
-    # row_unique_id is something that uniquely identifies the row (BigQuery uses
-    # it for duplicate detection).
-    row_unique_id = '%s_%s_%s' % (self.run_id, self.pod_name, event_type)
-    row = bq_utils.make_row(row_unique_id, row_values_dict)
-    return bq_utils.insert_rows(self.bq, self.project_id, self.dataset_id,
-                                self.summary_table_id, [row])
-
-  def insert_qps_row(self, qps, recorded_at):
-    row_values_dict = {
-        'run_id': self.run_id,
-        'pod_name': self.pod_name,
-        'recorded_at': recorded_at,
-        'qps': qps
-    }
-
-    # row_unique_id is something that uniquely identifies the row (BigQuery uses
-    # it for duplicate detection).
-    row_unique_id = '%s_%s_%s' % (self.run_id, self.pod_name, recorded_at)
-    row = bq_utils.make_row(row_unique_id, row_values_dict)
-    return bq_utils.insert_rows(self.bq, self.project_id, self.dataset_id,
-                                self.qps_table_id, [row])
-
-  def check_if_any_tests_failed(self, num_query_retries=3, timeout_msec=30000):
-    query = ('SELECT event_type FROM %s.%s WHERE run_id = \'%s\' AND '
-             'event_type="%s"') % (self.dataset_id, self.summary_table_id,
-                                   self.run_id, EventType.FAILURE)
-    page = None
-    try:
-      query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
-      job_id = query_job['jobReference']['jobId']
-      project_id = query_job['jobReference']['projectId']
-      page = self.bq.jobs().getQueryResults(
-          projectId=project_id,
-          jobId=job_id,
-          timeoutMs=timeout_msec).execute(num_retries=num_query_retries)
-
-      if not page['jobComplete']:
-        print('TIMEOUT ERROR: The query %s timed out. Current timeout value is'
-              ' %d msec. Returning False (i.e assuming there are no failures)'
-             ) % (query, timeout_msec)
-        return False
-
-      num_failures = int(page['totalRows'])
-      print 'num rows: ', num_failures
-      return num_failures > 0
-    except:
-      print 'Exception in check_if_any_tests_failed(). Info: ', sys.exc_info()
-      print 'Query: ', query
-
-  def print_summary_records(self, num_query_retries=3):
-    line = '-' * 120
-    print line
-    print 'Summary records'
-    print 'Run Id: ', self.run_id
-    print 'Dataset Id: ', self.dataset_id
-    print line
-    query = ('SELECT pod_name, image_type, event_type, event_date, details'
-             ' FROM %s.%s WHERE run_id = \'%s\' ORDER by event_date;') % (
-                 self.dataset_id, self.summary_table_id, self.run_id)
-    query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
-
-    print '{:<25} {:<12} {:<12} {:<30} {}'.format('Pod name', 'Image type',
-                                                  'Event type', 'Date',
-                                                  'Details')
-    print line
-    page_token = None
-    while True:
-      page = self.bq.jobs().getQueryResults(
-          pageToken=page_token,
-          **query_job['jobReference']).execute(num_retries=num_query_retries)
-      rows = page.get('rows', [])
-      for row in rows:
-        print '{:<25} {:<12} {:<12} {:<30} {}'.format(row['f'][0]['v'],
-                                                      row['f'][1]['v'],
-                                                      row['f'][2]['v'],
-                                                      row['f'][3]['v'],
-                                                      row['f'][4]['v'])
-      page_token = page.get('pageToken')
-      if not page_token:
-        break
-
-  def print_qps_records(self, num_query_retries=3):
-    line = '-' * 80
-    print line
-    print 'QPS Summary'
-    print 'Run Id: ', self.run_id
-    print 'Dataset Id: ', self.dataset_id
-    print line
-    query = (
-        'SELECT pod_name, recorded_at, qps FROM %s.%s WHERE run_id = \'%s\' '
-        'ORDER by recorded_at;') % (self.dataset_id, self.qps_table_id,
-                                    self.run_id)
-    query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
-    print '{:<25} {:30} {}'.format('Pod name', 'Recorded at', 'Qps')
-    print line
-    page_token = None
-    while True:
-      page = self.bq.jobs().getQueryResults(
-          pageToken=page_token,
-          **query_job['jobReference']).execute(num_retries=num_query_retries)
-      rows = page.get('rows', [])
-      for row in rows:
-        print '{:<25} {:30} {}'.format(row['f'][0]['v'], row['f'][1]['v'],
-                                       row['f'][2]['v'])
-      page_token = page.get('pageToken')
-      if not page_token:
-        break
-
-  def __create_summary_table(self):
-    summary_table_schema = [
-        ('run_id', 'STRING', 'Test run id'),
-        ('image_type', 'STRING', 'Client or Server?'),
-        ('pod_name', 'STRING', 'GKE pod hosting this image'),
-        ('event_date', 'STRING', 'The date of this event'),
-        ('event_type', 'STRING', 'STARTING/RUNNING/SUCCESS/FAILURE'),
-        ('details', 'STRING', 'Any other relevant details')
-    ]
-    desc = ('The table that contains STARTING/RUNNING/SUCCESS/FAILURE events '
-            'for the stress test clients and servers')
-    return bq_utils.create_table(self.bq, self.project_id, self.dataset_id,
-                                 self.summary_table_id, summary_table_schema,
-                                 desc)
-
-  def __create_qps_table(self):
-    qps_table_schema = [
-        ('run_id', 'STRING', 'Test run id'),
-        ('pod_name', 'STRING', 'GKE pod hosting this image'),
-        ('recorded_at', 'STRING', 'Metrics recorded at time'),
-        ('qps', 'INTEGER', 'Queries per second')
-    ]
-    desc = 'The table that cointains the qps recorded at various intervals'
-    return bq_utils.create_table(self.bq, self.project_id, self.dataset_id,
-                                 self.qps_table_id, qps_table_schema, desc)
diff --git a/tools/gcp/utils/kubernetes_api.py b/tools/gcp/utils/kubernetes_api.py
deleted file mode 100755
index a8a4aad69b455342a7f1025ababbdd80b8dfb275..0000000000000000000000000000000000000000
--- a/tools/gcp/utils/kubernetes_api.py
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/env python2.7
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import requests
-import json
-
-_REQUEST_TIMEOUT_SECS = 10
-
-
-def _make_pod_config(pod_name, image_name, container_port_list, cmd_list,
-                     arg_list, env_dict):
-  """Creates a string containing the Pod defintion as required by the Kubernetes API"""
-  body = {
-      'kind': 'Pod',
-      'apiVersion': 'v1',
-      'metadata': {
-          'name': pod_name,
-          'labels': {'name': pod_name}
-      },
-      'spec': {
-          'containers': [
-              {
-                  'name': pod_name,
-                  'image': image_name,
-                  'ports': [{'containerPort': port,
-                             'protocol': 'TCP'}
-                            for port in container_port_list],
-                  'imagePullPolicy': 'Always'
-              }
-          ]
-      }
-  }
-
-  env_list = [{'name': k, 'value': v} for (k, v) in env_dict.iteritems()]
-  if len(env_list) > 0:
-    body['spec']['containers'][0]['env'] = env_list
-
-  # Add the 'Command' and 'Args' attributes if they are passed.
-  # Note:
-  #  - 'Command' overrides the ENTRYPOINT in the Docker Image
-  #  - 'Args' override the CMD in Docker image (yes, it is confusing!)
-  if len(cmd_list) > 0:
-    body['spec']['containers'][0]['command'] = cmd_list
-  if len(arg_list) > 0:
-    body['spec']['containers'][0]['args'] = arg_list
-  return json.dumps(body)
-
-
-def _make_service_config(service_name, pod_name, service_port_list,
-                         container_port_list, is_headless):
-  """Creates a string containing the Service definition as required by the Kubernetes API.
-
-  NOTE:
-  This creates either a Headless Service or 'LoadBalancer' service depending on
-  the is_headless parameter. For Headless services, there is no 'type' attribute
-  and the 'clusterIP' attribute is set to 'None'. Also, if the service is
-  Headless, Kubernetes creates DNS entries for Pods - i.e creates DNS A-records
-  mapping the service's name to the Pods' IPs
-  """
-  if len(container_port_list) != len(service_port_list):
-    print(
-        'ERROR: container_port_list and service_port_list must be of same size')
-    return ''
-  body = {
-      'kind': 'Service',
-      'apiVersion': 'v1',
-      'metadata': {
-          'name': service_name,
-          'labels': {
-              'name': service_name
-          }
-      },
-      'spec': {
-          'ports': [],
-          'selector': {
-              'name': pod_name
-          }
-      }
-  }
-  # Populate the 'ports' list in the 'spec' section. This maps service ports
-  # (port numbers that are exposed by Kubernetes) to container ports (i.e port
-  # numbers that are exposed by your Docker image)
-  for idx in range(len(container_port_list)):
-    port_entry = {
-        'port': service_port_list[idx],
-        'targetPort': container_port_list[idx],
-        'protocol': 'TCP'
-    }
-    body['spec']['ports'].append(port_entry)
-
-  # Make this either a LoadBalancer service or a headless service depending on
-  # the is_headless parameter
-  if is_headless:
-    body['spec']['clusterIP'] = 'None'
-  else:
-    body['spec']['type'] = 'LoadBalancer'
-  return json.dumps(body)
-
-
-def _print_connection_error(msg):
-  print('ERROR: Connection failed. Did you remember to run Kubenetes proxy on '
-        'localhost (i.e kubectl proxy --port=<proxy_port>) ?. Error: %s' % msg)
-
-
-def _do_post(post_url, api_name, request_body):
-  """Helper to do HTTP POST.
-
-  Note:
-  1) On success, Kubernetes returns a success code of 201(CREATED) not 200(OK)
-  2) A response code of 509(CONFLICT) is interpreted as a success code (since
-  the error is most likely due to the resource already existing). This makes
-  _do_post() idempotent which is semantically desirable.
-  """
-  is_success = True
-  try:
-    r = requests.post(post_url,
-                      data=request_body,
-                      timeout=_REQUEST_TIMEOUT_SECS)
-    if r.status_code == requests.codes.conflict:
-      print('WARN: Looks like the resource already exists. Api: %s, url: %s' %
-            (api_name, post_url))
-    elif r.status_code != requests.codes.created:
-      print('ERROR: %s API returned error. HTTP response: (%d) %s' %
-            (api_name, r.status_code, r.text))
-      is_success = False
-  except (requests.exceptions.Timeout,
-          requests.exceptions.ConnectionError) as e:
-    is_success = False
-    _print_connection_error(str(e))
-  return is_success
-
-
-def _do_delete(del_url, api_name):
-  """Helper to do HTTP DELETE.
-
-  Note: A response code of 404(NOT_FOUND) is treated as success to keep
-  _do_delete() idempotent.
-  """
-  is_success = True
-  try:
-    r = requests.delete(del_url, timeout=_REQUEST_TIMEOUT_SECS)
-    if r.status_code == requests.codes.not_found:
-      print('WARN: The resource does not exist. Api: %s, url: %s' %
-            (api_name, del_url))
-    elif r.status_code != requests.codes.ok:
-      print('ERROR: %s API returned error. HTTP response: %s' %
-            (api_name, r.text))
-      is_success = False
-  except (requests.exceptions.Timeout,
-          requests.exceptions.ConnectionError) as e:
-    is_success = False
-    _print_connection_error(str(e))
-  return is_success
-
-
-def create_service(kube_host, kube_port, namespace, service_name, pod_name,
-                   service_port_list, container_port_list, is_headless):
-  """Creates either a Headless Service or a LoadBalancer Service depending
-  on the is_headless parameter.
-  """
-  post_url = 'http://%s:%d/api/v1/namespaces/%s/services' % (
-      kube_host, kube_port, namespace)
-  request_body = _make_service_config(service_name, pod_name, service_port_list,
-                                      container_port_list, is_headless)
-  return _do_post(post_url, 'Create Service', request_body)
-
-
-def create_pod(kube_host, kube_port, namespace, pod_name, image_name,
-               container_port_list, cmd_list, arg_list, env_dict):
-  """Creates a Kubernetes Pod.
-
-  Note that it is generally NOT considered a good practice to directly create
-  Pods. Typically, the recommendation is to create 'Controllers' to create and
-  manage Pods' lifecycle. Currently Kubernetes only supports 'Replication
-  Controller' which creates a configurable number of 'identical Replicas' of
-  Pods and automatically restarts any Pods in case of failures (for eg: Machine
-  failures in Kubernetes). This makes it less flexible for our test use cases
-  where we might want slightly different set of args to each Pod. Hence we
-  directly create Pods and not care much about Kubernetes failures since those
-  are very rare.
-  """
-  post_url = 'http://%s:%d/api/v1/namespaces/%s/pods' % (kube_host, kube_port,
-                                                         namespace)
-  request_body = _make_pod_config(pod_name, image_name, container_port_list,
-                                  cmd_list, arg_list, env_dict)
-  return _do_post(post_url, 'Create Pod', request_body)
-
-
-def delete_service(kube_host, kube_port, namespace, service_name):
-  del_url = 'http://%s:%d/api/v1/namespaces/%s/services/%s' % (
-      kube_host, kube_port, namespace, service_name)
-  return _do_delete(del_url, 'Delete Service')
-
-
-def delete_pod(kube_host, kube_port, namespace, pod_name):
-  del_url = 'http://%s:%d/api/v1/namespaces/%s/pods/%s' % (kube_host, kube_port,
-                                                           namespace, pod_name)
-  return _do_delete(del_url, 'Delete Pod')
-
-
-def create_pod_and_service(kube_host, kube_port, namespace, pod_name,
-                           image_name, container_port_list, cmd_list, arg_list,
-                           env_dict, is_headless_service):
-  """A helper function that creates a pod and a service (if pod creation was successful)."""
-  is_success = create_pod(kube_host, kube_port, namespace, pod_name, image_name,
-                          container_port_list, cmd_list, arg_list, env_dict)
-  if not is_success:
-    print 'Error in creating Pod'
-    return False
-
-  is_success = create_service(
-      kube_host,
-      kube_port,
-      namespace,
-      pod_name,  # Use pod_name for service
-      pod_name,
-      container_port_list,  # Service port list same as container port list
-      container_port_list,
-      is_headless_service)
-  if not is_success:
-    print 'Error in creating Service'
-    return False
-
-  print 'Successfully created the pod/service %s' % pod_name
-  return True
-
-
-def delete_pod_and_service(kube_host, kube_port, namespace, pod_name):
-  """ A helper function that calls delete_pod and delete_service """
-  is_success = delete_pod(kube_host, kube_port, namespace, pod_name)
-  if not is_success:
-    print 'Error in deleting pod %s' % pod_name
-    return False
-
-  # Note: service name assumed to the the same as pod name
-  is_success = delete_service(kube_host, kube_port, namespace, pod_name)
-  if not is_success:
-    print 'Error in deleting service %s' % pod_name
-    return False
-
-  print 'Successfully deleted the Pod/Service: %s' % pod_name
-  return True
diff --git a/tools/jenkins/run_interop_stress.sh b/tools/jenkins/run_interop_stress.sh
deleted file mode 100755
index 22d81db8bcdf217f0f65371301c6e0faae710518..0000000000000000000000000000000000000000
--- a/tools/jenkins/run_interop_stress.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# This script is invoked by Jenkins and runs interop test suite.
-set -ex
-
-# Enter the gRPC repo root
-cd $(dirname $0)/../..
-
-tools/run_tests/run_stress_tests.py -l all -s all -j 12 $@ || true
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index 04702baccaeba3afe99b21d18c2146e8acf24388..c90da066c349c1832c24495ff5604ce54cba8c65 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -202,6 +202,7 @@ class CSharpExtArtifact:
                  'EMBED_OPENSSL': 'true',
                  'EMBED_ZLIB': 'true',
                  'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+                 'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
                  'LDFLAGS': ''}
       if self.platform == 'linux':
         return create_docker_jobspec(self.name,
@@ -211,6 +212,7 @@ class CSharpExtArtifact:
       else:
         archflag = _ARCH_FLAG_MAP[self.arch]
         environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+        environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
         environ['LDFLAGS'] += ' %s' % archflag
         return create_jobspec(self.name,
                               ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
diff --git a/tools/run_tests/dockerize/build_docker_and_run_tests.sh b/tools/run_tests/dockerize/build_docker_and_run_tests.sh
index f10916d19235f1284287e9f5111ec76fcc419072..731bb02d214480bc73238b1de2a4a92389433a2e 100755
--- a/tools/run_tests/dockerize/build_docker_and_run_tests.sh
+++ b/tools/run_tests/dockerize/build_docker_and_run_tests.sh
@@ -79,7 +79,10 @@ docker run \
   -e HOST_GIT_ROOT=$git_root \
   -e LOCAL_GIT_ROOT=$docker_instance_git_root \
   -e "BUILD_ID=$BUILD_ID" \
+  -e "BUILD_URL=$BUILD_URL" \
+  -e "JOB_BASE_NAME=$JOB_BASE_NAME" \
   -i $TTY_FLAG \
+  -v ~/.config/gcloud:/root/.config/gcloud \
   -v "$git_root:$docker_instance_git_root" \
   -v /tmp/ccache:/tmp/ccache \
   -v /tmp/npm-cache:/tmp/npm-cache \
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index ec8aad69957eceaab284b6b955efa1b060bd9120..c77961c012494e66ae9a2e02842e61e3f3e9ddce 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -4205,7 +4205,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4229,7 +4230,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4253,7 +4255,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4277,7 +4280,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4303,7 +4307,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4327,7 +4332,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4351,7 +4357,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4378,7 +4385,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4405,7 +4413,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4432,7 +4441,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4459,7 +4469,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4486,7 +4497,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4513,7 +4525,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4540,7 +4553,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4567,7 +4581,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4594,7 +4609,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4621,7 +4637,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4648,7 +4665,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4675,7 +4693,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4702,7 +4721,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4729,7 +4749,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4756,7 +4777,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4783,7 +4805,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4810,7 +4833,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4837,7 +4861,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4864,7 +4889,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4891,7 +4917,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4917,7 +4944,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4941,7 +4969,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4965,7 +4994,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -4991,7 +5021,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5015,7 +5046,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5039,7 +5071,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5063,7 +5096,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5087,7 +5121,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5111,7 +5146,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5135,7 +5171,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5159,7 +5196,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5185,7 +5223,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5211,7 +5250,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5235,7 +5275,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5261,7 +5302,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5285,7 +5327,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5309,7 +5352,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5335,7 +5379,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5359,7 +5404,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5383,7 +5429,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5409,7 +5456,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5433,7 +5481,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5457,7 +5506,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5481,7 +5531,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5505,7 +5556,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5531,7 +5583,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5555,7 +5608,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5579,7 +5633,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5603,7 +5658,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5629,7 +5685,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5653,7 +5710,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5677,7 +5735,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5701,7 +5760,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5725,7 +5785,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5749,7 +5810,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5773,7 +5835,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5797,7 +5860,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -5821,7 +5885,8 @@
     "cpu_cost": 1.0, 
     "defaults": "boringssl", 
     "exclude_configs": [
-      "asan"
+      "asan", 
+      "ubsan"
     ], 
     "flaky": false, 
     "language": "c++", 
@@ -41438,7 +41503,7 @@
   {
     "args": [
       "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
     "ci_platforms": [
@@ -42217,7 +42282,7 @@
   {
     "args": [
       "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
     "ci_platforms": [
@@ -43074,7 +43139,7 @@
   {
     "args": [
       "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
     "ci_platforms": [
@@ -44256,7 +44321,7 @@
   {
     "args": [
       "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
     "ci_platforms": [
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 459120d647fac7a1c860d67be9184fdaf2c70629..8ed675ecc3c1928b957bb20158fb05ec399941fb 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -53,6 +53,7 @@ HISTOGRAM_PARAMS = {
 # actual target will be slightly higher)
 OUTSTANDING_REQUESTS={
     'async': 6400,
+    'async-1core': 800,
     'sync': 1000
 }
 
@@ -265,7 +266,7 @@ class CXXLanguage:
           rpc_type='STREAMING',
           client_type='ASYNC_CLIENT',
           server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
+          unconstrained_client='async-1core', use_generic_payload=True,
           async_server_threads=1,
           secure=secure)
 
@@ -752,7 +753,7 @@ class JavaLanguage:
       yield _ping_pong_scenario(
           'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
           client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
+          unconstrained_client='async-1core', use_generic_payload=True,
           async_server_threads=1,
           secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
 
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index d01e82a76b18ea7587af7844838cf56b7eba3166..4e97128d46717c74b3cc33e5b459ffa3e24dffdc 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -222,7 +222,8 @@ class JobResult(object):
     self.num_failures = 0
     self.retries = 0
     self.message = ''
-
+    self.cpu_estimated = 1
+    self.cpu_measured = 0
 
 class Job(object):
   """Manages one job."""
@@ -312,7 +313,9 @@ class Job(object):
           sys = float(m.group(3))
           if real > 0.5:
             cores = (user + sys) / real
-            measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
+            self.result.cpu_measured = float('%.01f' % cores)
+            self.result.cpu_estimated = float('%.01f' % self._spec.cpu_cost)
+            measurement = '; cpu_cost=%.01f; estimated=%.01f' % (self.result.cpu_measured, self.result.cpu_estimated)
         if not self._quiet_success:
           message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
               self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
diff --git a/tools/run_tests/python_utils/report_utils.py b/tools/run_tests/python_utils/report_utils.py
index 502efc31f4fef77293f5af0d82bb9654260476fc..b8dd67e06048f9ac4e464d2366da887938573781 100644
--- a/tools/run_tests/python_utils/report_utils.py
+++ b/tools/run_tests/python_utils/report_utils.py
@@ -89,6 +89,7 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
   tree = ET.ElementTree(root)
   tree.write(xml_report, encoding='UTF-8')
 
+
 def render_interop_html_report(
   client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
   http2_server_cases, resultset,
diff --git a/tools/run_tests/python_utils/upload_test_results.py b/tools/run_tests/python_utils/upload_test_results.py
new file mode 100644
index 0000000000000000000000000000000000000000..d076d1e5a2a1683ac0d613f3271cd7b83a01a795
--- /dev/null
+++ b/tools/run_tests/python_utils/upload_test_results.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Helper to upload Jenkins test results to BQ"""
+
+from __future__ import print_function
+
+import os
+import six
+import sys
+import time
+import uuid
+
+gcp_utils_dir = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '../../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+import big_query_utils
+
+_DATASET_ID = 'jenkins_test_results'
+_DESCRIPTION = 'Test results from master job run on Jenkins'
+_PROJECT_ID = 'grpc-testing'
+_RESULTS_SCHEMA = [
+  ('job_name', 'STRING', 'Name of Jenkins job'),
+  ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
+  ('build_url', 'STRING', 'URL of Jenkins job'),
+  ('test_name', 'STRING', 'Individual test name'),
+  ('language', 'STRING', 'Language of test'),
+  ('platform', 'STRING', 'Platform used for test'),
+  ('config', 'STRING', 'Config used for test'),
+  ('compiler', 'STRING', 'Compiler used for test'),
+  ('iomgr_platform', 'STRING', 'Iomgr used for test'),
+  ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+  ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+  ('elapsed_time', 'FLOAT', 'How long test took to run'),
+  ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
+  ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
+]
+
+
+def _get_build_metadata(test_results):
+  """Add Jenkins build metadata to test_results based on environment variables set by Jenkins."""
+  build_id = os.getenv('BUILD_ID')
+  build_url = os.getenv('BUILD_URL')
+  job_name = os.getenv('JOB_BASE_NAME')
+
+  if build_id:
+    test_results['build_id'] = build_id
+  if build_url:
+    test_results['build_url'] = build_url
+  if job_name:
+    test_results['job_name'] = job_name
+
+def upload_results_to_bq(resultset, bq_table, args, platform):
+  """Upload test results to a BQ table.
+
+  Args:
+      resultset: dictionary generated by jobset.run
+      bq_table: string name of table to create/upload results to in BQ
+      args: args in run_tests.py, generated by argparse
+      platform: string name of platform tests were run on
+  """
+  bq = big_query_utils.create_big_query()
+  big_query_utils.create_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION)
+
+  for shortname, results in six.iteritems(resultset):
+    for result in results:
+      test_results = {}
+      _get_build_metadata(test_results)
+      test_results['compiler'] = args.compiler
+      test_results['config'] = args.config
+      test_results['cpu_estimated'] = result.cpu_estimated
+      test_results['cpu_measured'] = result.cpu_measured
+      test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+      test_results['iomgr_platform'] = args.iomgr_platform
+      # args.language is a list, but will always have one element in the contexts
+      # this function is used.
+      test_results['language'] = args.language[0]
+      test_results['platform'] = platform
+      test_results['result'] = result.state
+      test_results['test_name'] = shortname
+      test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+
+      row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+      if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
+        print('Error uploading result to bigquery.')
+        sys.exit(1)
diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py
deleted file mode 100755
index 4eea02118e54682f5c6e26b36f53c7fdaa11966f..0000000000000000000000000000000000000000
--- a/tools/run_tests/run_stress_tests.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Run stress test in C++"""
-
-from __future__ import print_function
-
-import argparse
-import atexit
-import itertools
-import json
-import multiprocessing
-import os
-import re
-import subprocess
-import sys
-import tempfile
-import time
-import uuid
-import six
-
-import python_utils.dockerjob as dockerjob
-import python_utils.jobset as jobset
-
-# Docker doesn't clean up after itself, so we do it on exit.
-atexit.register(lambda: subprocess.call(['stty', 'echo']))
-
-ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
-os.chdir(ROOT)
-
-_DEFAULT_SERVER_PORT = 8080
-_DEFAULT_METRICS_PORT = 8081
-_DEFAULT_TEST_CASES = 'empty_unary:20,large_unary:20,client_streaming:20,server_streaming:20,empty_stream:20'
-_DEFAULT_NUM_CHANNELS_PER_SERVER = 5
-_DEFAULT_NUM_STUBS_PER_CHANNEL = 10
-
-# 15 mins default
-_DEFAULT_TEST_DURATION_SECS = 900
-
-class CXXLanguage:
-
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.safename = 'cxx'
-
-  def client_cmd(self, args):
-    return ['bins/opt/stress_test'] + args
-
-  def server_cmd(self, args):
-    return ['bins/opt/interop_server'] + args
-
-  def global_env(self):
-    return {}
-
-  def __str__(self):
-    return 'c++'
-
-
-_LANGUAGES = {'c++': CXXLanguage(),}
-
-# languages supported as cloud_to_cloud servers
-_SERVERS = ['c++']
-
-DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
-
-
-def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
-  """Wraps given cmdline array to create 'docker run' cmdline from it."""
-  docker_cmdline = ['docker', 'run', '-i', '--rm=true']
-
-  # turn environ into -e docker args
-  if environ:
-    for k, v in environ.items():
-      docker_cmdline += ['-e', '%s=%s' % (k, v)]
-
-  # set working directory
-  workdir = DOCKER_WORKDIR_ROOT
-  if cwd:
-    workdir = os.path.join(workdir, cwd)
-  docker_cmdline += ['-w', workdir]
-
-  docker_cmdline += docker_args + [image] + cmdline
-  return docker_cmdline
-
-
-def bash_login_cmdline(cmdline):
-  """Creates bash -l -c cmdline from args list."""
-  # Use login shell:
-  # * rvm and nvm require it
-  # * makes error messages clearer if executables are missing
-  return ['bash', '-l', '-c', ' '.join(cmdline)]
-
-
-def _job_kill_handler(job):
-  if job._spec.container_name:
-    dockerjob.docker_kill(job._spec.container_name)
-    # When the job times out and we decide to kill it,
-    # we need to wait a before restarting the job
-    # to prevent "container name already in use" error.
-    # TODO(jtattermusch): figure out a cleaner way to to this.
-    time.sleep(2)
-
-
-def cloud_to_cloud_jobspec(language,
-                           test_cases,
-                           server_addresses,
-                           test_duration_secs,
-                           num_channels_per_server,
-                           num_stubs_per_channel,
-                           metrics_port,
-                           docker_image=None):
-  """Creates jobspec for cloud-to-cloud interop test"""
-  cmdline = bash_login_cmdline(language.client_cmd([
-      '--test_cases=%s' % test_cases, '--server_addresses=%s' %
-      server_addresses, '--test_duration_secs=%s' % test_duration_secs,
-      '--num_stubs_per_channel=%s' % num_stubs_per_channel,
-      '--num_channels_per_server=%s' % num_channels_per_server,
-      '--metrics_port=%s' % metrics_port
-  ]))
-  print(cmdline)
-  cwd = language.client_cwd
-  environ = language.global_env()
-  if docker_image:
-    container_name = dockerjob.random_name('interop_client_%s' %
-                                           language.safename)
-    cmdline = docker_run_cmdline(
-        cmdline,
-        image=docker_image,
-        environ=environ,
-        cwd=cwd,
-        docker_args=['--net=host', '--name', container_name])
-    cwd = None
-
-  test_job = jobset.JobSpec(cmdline=cmdline,
-                            cwd=cwd,
-                            environ=environ,
-                            shortname='cloud_to_cloud:%s:%s_server:stress_test' % (
-                                language, server_name),
-                            timeout_seconds=test_duration_secs * 2,
-                            flake_retries=0,
-                            timeout_retries=0,
-                            kill_handler=_job_kill_handler)
-  test_job.container_name = container_name
-  return test_job
-
-
-def server_jobspec(language, docker_image, test_duration_secs):
-  """Create jobspec for running a server"""
-  container_name = dockerjob.random_name('interop_server_%s' %
-                                         language.safename)
-  cmdline = bash_login_cmdline(language.server_cmd(['--port=%s' %
-                                                    _DEFAULT_SERVER_PORT]))
-  environ = language.global_env()
-  docker_cmdline = docker_run_cmdline(
-      cmdline,
-      image=docker_image,
-      cwd=language.server_cwd,
-      environ=environ,
-      docker_args=['-p', str(_DEFAULT_SERVER_PORT), '--name', container_name])
-
-  server_job = jobset.JobSpec(cmdline=docker_cmdline,
-                              environ=environ,
-                              shortname='interop_server_%s' % language,
-                              timeout_seconds=test_duration_secs * 3)
-  server_job.container_name = container_name
-  return server_job
-
-
-def build_interop_stress_image_jobspec(language, tag=None):
-  """Creates jobspec for building stress test docker image for a language"""
-  if not tag:
-    tag = 'grpc_interop_stress_%s:%s' % (language.safename, uuid.uuid4())
-  env = {'INTEROP_IMAGE': tag,
-         'BASE_NAME': 'grpc_interop_stress_%s' % language.safename}
-  build_job = jobset.JobSpec(cmdline=['tools/run_tests/dockerize/build_interop_stress_image.sh'],
-                             environ=env,
-                             shortname='build_docker_%s' % (language),
-                             timeout_seconds=30 * 60)
-  build_job.tag = tag
-  return build_job
-
-argp = argparse.ArgumentParser(description='Run stress tests.')
-argp.add_argument('-l',
-                  '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Clients to run.')
-argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument(
-    '-s',
-    '--server',
-    choices=['all'] + sorted(_SERVERS),
-    action='append',
-    help='Run cloud_to_cloud servers in a separate docker ' + 'image.',
-    default=[])
-argp.add_argument(
-    '--override_server',
-    action='append',
-    type=lambda kv: kv.split('='),
-    help=
-    'Use servername=HOST:PORT to explicitly specify a server. E.g. '
-    'csharp=localhost:50000',
-    default=[])
-argp.add_argument('--test_duration_secs',
-                  help='The duration of the test in seconds',
-                  default=_DEFAULT_TEST_DURATION_SECS)
-
-args = argp.parse_args()
-
-servers = set(
-    s
-    for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
-                                           for x in args.server))
-
-languages = set(_LANGUAGES[l] for l in itertools.chain.from_iterable(
-  six.iterkeys(_LANGUAGES) if x == 'all' else [x] for x in args.language))
-
-docker_images = {}
-# languages for which to build docker images
-languages_to_build = set(
-    _LANGUAGES[k]
-    for k in set([str(l) for l in languages] + [s for s in servers]))
-build_jobs = []
-for l in languages_to_build:
-  job = build_interop_stress_image_jobspec(l)
-  docker_images[str(l)] = job.tag
-  build_jobs.append(job)
-
-if build_jobs:
-  jobset.message('START', 'Building interop docker images.', do_newline=True)
-  num_failures, _ = jobset.run(build_jobs,
-                               newline_on_success=True,
-                               maxjobs=args.jobs)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'All docker images built successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED',
-                   'Failed to build interop docker images.',
-                   do_newline=True)
-    for image in six.itervalues(docker_images):
-      dockerjob.remove_image(image, skip_nonexistent=True)
-    sys.exit(1)
-
-# Start interop servers.
-server_jobs = {}
-server_addresses = {}
-try:
-  for s in servers:
-    lang = str(s)
-    spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang), args.test_duration_secs)
-    job = dockerjob.DockerJob(spec)
-    server_jobs[lang] = job
-    server_addresses[lang] = ('localhost',
-                              job.mapped_port(_DEFAULT_SERVER_PORT))
-
-  jobs = []
-
-  for server in args.override_server:
-    server_name = server[0]
-    (server_host, server_port) = server[1].split(':')
-    server_addresses[server_name] = (server_host, server_port)
-
-  for server_name, server_address in server_addresses.items():
-    (server_host, server_port) = server_address
-    for language in languages:
-      test_job = cloud_to_cloud_jobspec(
-          language,
-          _DEFAULT_TEST_CASES,
-          ('%s:%s' % (server_host, server_port)),
-          args.test_duration_secs,
-          _DEFAULT_NUM_CHANNELS_PER_SERVER,
-          _DEFAULT_NUM_STUBS_PER_CHANNEL,
-          _DEFAULT_METRICS_PORT,
-          docker_image=docker_images.get(str(language)))
-      jobs.append(test_job)
-
-  if not jobs:
-    print('No jobs to run.')
-    for image in six.itervalues(docker_images):
-      dockerjob.remove_image(image, skip_nonexistent=True)
-    sys.exit(1)
-
-  num_failures, resultset = jobset.run(jobs,
-                                       newline_on_success=True,
-                                       maxjobs=args.jobs)
-  if num_failures:
-    jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  else:
-    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
-
-finally:
-  # Check if servers are still running.
-  for server, job in server_jobs.items():
-    if not job.is_running():
-      print('Server "%s" has exited prematurely.' % server)
-
-  dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
-
-  for image in six.itervalues(docker_images):
-    print('Removing docker image %s' % image)
-    dockerjob.remove_image(image)
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 62e92c3d1dd0df8deda3696a6fc502642e347db1..22891c5c982ebd25b7edb4bf8dbb7edad534fa35 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -60,7 +60,10 @@ import python_utils.jobset as jobset
 import python_utils.report_utils as report_utils
 import python_utils.watch_dirs as watch_dirs
 import python_utils.start_port_server as start_port_server
-
+try:
+  from python_utils.upload_test_results import upload_results_to_bq
+except (ImportError):
+  pass # It's ok to not import because this is only necessary to upload results to BQ.
 
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
@@ -1185,7 +1188,7 @@ argp.add_argument('--build_only',
                   default=False,
                   action='store_const',
                   const=True,
-                  help='Perform all the build steps but dont run any tests.')
+                  help='Perform all the build steps but don\'t run any tests.')
 argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
                   help='Measure the cpu costs of tests')
 argp.add_argument('--update_submodules', default=[], nargs='*',
@@ -1200,11 +1203,16 @@ argp.add_argument('--quiet_success',
                   default=False,
                   action='store_const',
                   const=True,
-                  help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
+                  help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
                        'Useful when running many iterations of each test (argument -n).')
 argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
-                  help='Dont try to iterate over many polling strategies when they exist')
+                  help='Don\'t try to iterate over many polling strategies when they exist')
 argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
+argp.add_argument('--bq_result_table',
+                  default='',
+                  type=str,
+                  nargs='?',
+                  help='Upload test results to a specified BQ table.')
 args = argp.parse_args()
 
 if args.force_default_poller:
@@ -1503,6 +1511,8 @@ def _build_and_run(
   finally:
     for antagonist in antagonists:
       antagonist.kill()
+    if args.bq_result_table and resultset:
+      upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
     if xml_report and resultset:
       report_utils.render_junit_xml_report(resultset, xml_report,
                                            suite_name=args.report_suite_name)
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 474b56b5de5f6dc5735226ab4285ae6dce2fa04f..884d626469d415f528e9d1c07795d47e498ef633 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -208,7 +208,7 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS)
                                 extra_args=extra_args,
                                 inner_jobs=inner_jobs)
 
-  for compiler in ['gcc4.8', 'gcc5.3',
+  for compiler in ['gcc4.8', 'gcc5.3', 'gcc_musl',
                    'clang3.5', 'clang3.6', 'clang3.7']:
     test_jobs += _generate_jobs(languages=['c++'],
                                 configs=['dbg'],
@@ -267,6 +267,15 @@ def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS)
                               extra_args=extra_args,
                               inner_jobs=inner_jobs)
 
+  test_jobs += _generate_jobs(languages=['python'],
+                              configs=['dbg'],
+                              platforms=['linux'],
+                              arch='default',
+                              compiler='python_alpine',
+                              labels=['portability'],
+                              extra_args=extra_args,
+                              inner_jobs=inner_jobs)
+
   test_jobs += _generate_jobs(languages=['csharp'],
                               configs=['dbg'],
                               platforms=['linux'],
@@ -387,6 +396,11 @@ if __name__ == "__main__":
                     const=True,
                     help='Put reports into subdirectories to improve presentation of '
                     'results by Internal CI.')
+  argp.add_argument('--bq_result_table',
+                    default='',
+                    type=str,
+                    nargs='?',
+                    help='Upload test results to a specified BQ table.')
   args = argp.parse_args()
 
   if args.internal_ci:
@@ -403,6 +417,10 @@ if __name__ == "__main__":
     extra_args.append('--quiet_success')
   if args.max_time > 0:
     extra_args.extend(('--max_time', '%d' % args.max_time))
+  if args.bq_result_table:
+    extra_args.append('--bq_result_table')
+    extra_args.append('%s' % args.bq_result_table)
+    extra_args.append('--measure_cpu_costs')
 
   all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
              _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index 6be7a39d070f62fc459f55cfb7922b45938ccdc3..43d05212e875cae0177cb207443361b7c3526d07 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -46,7 +46,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules
  886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7)
  30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e third_party/gflags (v2.2.0)
  ec44c6c1675c25b9827aacd08c02433cccde7780 third_party/googletest (release-1.8.0)
- 593e917c176b5bc5aafa57bf9f6030d749d91cd5 third_party/protobuf (v3.1.0-alpha-1-326-g593e917)
+ a6189acd18b00611c1dc7042299ad75486f08a1a third_party/protobuf (v3.3.0)
  cacf7f1d4e3d44d871b605da3b647f07d718623f third_party/zlib (v1.2.11)
  7691f773af79bf75a62d1863fd0f13ebf9dc51b1 third_party/cares/cares (1.12.0)
 EOF
diff --git a/tools/run_tests/stress_test/README.md b/tools/run_tests/stress_test/README.md
deleted file mode 100644
index cc82b875cddd3aabf6ee458c190bf7ea9972815d..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/README.md
+++ /dev/null
@@ -1,76 +0,0 @@
-Running Stress tests on Google Container Engine
-=======================================
-
-### **Glossary**:
-* GCP: Google Cloud Platform
-* GCE: Google Compute Engine
-* GKE: Google Container Engine
-* GCP console: https://console.cloud.google.com
-
-### **Setup Instructions**
-#### *On GCP:*
-1. Login to GCP with your Google account (for example, your @gmail account) at https://cloud.google.com. If do not have a Google account, you will have to create an account first.
-2. Enable billing on Google cloud platform. Instructions [here](https://cloud.google.com/container-engine/docs/before-you-begin)  (see the '*Enable billing*' section).
-3. Create a Project from the [GCP console](https://console.cloud.google.com).i.e Click on the project dropdown box on the top right (to the right of the search box) and click '*Create a project*' option.
-4. Enable the Container Engine API. Instructions [here](https://cloud.google.com/container-engine/docs/before-you-begin)  (See the '*Enable the Container Engine API*’ section). Alternatively, you can do the following:
-    - Click on the '*Products & Services*' icon on the top left (i.e the icon with three small horizontal bars) and select '*API Manager*'
-    - Select the '*Container Engine API*' under '*Google Cloud APIs*' on the main page. Note that you might have to click on '*More*' under '*Google Cloud APIs*' to see the '*Container Engine API*' link
-    - Click on the '*Enable*' button. If the API is already enabled, the button's label would be '*Disable*' instead (do NOT click the button if its label is '*Disable*')
-5. Create a Cluster from the GCP console.
-    - Go to the Container Engine section from GCP console i.e: Click on the '*Products & Services*' icon on the top left (i.e the icon with three small horizontal bars) and click on '*Container Engine*'
-    - Click '*Create Container Cluster*' and follow the instructions.
-    - The instructions for 'Name/Zone/MachineType' etc are [here](https://cloud.google.com/container-engine/docs/clusters/operations) (**NOTE**: The page also has instructions to setting up default clusters and configuring `kubectl`. We will be doing that later)
-    - For the cluster size, a smaller size of < 10 GCE instances is good enough for our use cases - assuming that we are planning to run a reasonably small number of stress client instances. For the machine type, something like '2 vCPUs 7.5 GB' (available in the drop down box) should be good enough.
-    - **IMPORTANT**: Before hitting the '*Create*' button, click on '*More*' link just above the '*Create*' button and Select '*Enabled*' for BigQuery , '*Enabled*' for Cloud Platform and '*Read/Write*' for Cloud User Accounts.
-    - Create the cluster by clicking '*Create*' button.
-
-#### *On your machine* (or the machine from which stress tests on GKE are launched):
-1. You need a working gRPC repository on your machine. If you do not have it, clone the grpc repository from github (https://github.com/grpc/grpc) and follow the instructions [here](https://github.com/grpc/grpc/blob/master/INSTALL.md)
-2. Install Docker. Instructions [here](https://docs.docker.com/engine/installation/)
-3. Install Google Cloud SDK. Instructions [here](https://cloud.google.com/sdk/). This installs the `gcloud` tool
-4. Install `kubectl`, Kubernetes command line tool using `gcloud`. i.e
-    - `$ gcloud components update kubectl`
-    - NOTE: If you are running this from a GCE instance, the command may fail with the following error:
-    ```
-     You cannot perform this action because this Cloud SDK installation is 
-     managed by an external package manager. If you would like to get the
-     latest version, please see our main download page at:
-
-     https://developers.google.com/cloud/sdk/
-
-     ERROR: (gcloud.components.update) The component manager is disabled for this installation
-    ```
-    -- If so, you will have to manually install Cloud SDK by doing the following
-    ```shell
-      $ # The following installs latest Cloud SDK and updates the PATH
-      $ # (Accept the default values when prompted)
-      $ curl https://sdk.cloud.google.com | bash
-      $ exec -l $SHELL
-      $ # Set the defaults. Pick the default GCE credentials when prompted (The service account
-      $ # name will have a name similar to: "xxx-compute@developer.gserviceaccount.com")
-      $ gcloud init
-    ``` 
-
-5. Install Google python client apis:
-    - `‘$ sudo pip install --upgrade google-api-python-client’`
-    -  **Note**: Do `$ sudo apt-get install python-pip` (or `$ easy_install -U pip`) if you do not have pip
-6. Install the `requests` Python package if you don’t have it already by doing `sudo pip install requests`. More details regarding `requests` package are [here](http://docs.python-requests.org/en/master/user/install/)
-7. Set the `gcloud` defaults: See the instructions [here](https://cloud.google.com/container-engine/docs/before-you-begin) under "*Set gcloud defaults*" section)
-    - Make sure you also fetch the cluster credentials for `kubectl` command to use. I.e `$ gcloud container clusters get-credentials CLUSTER_NAME`
-
-### **Launching Stress tests**
-
-The stress tests are launched by the following script (path is relative to GRPC root directory) :
-`tools/run_tests/stress_test/run_stress_tests_on_gke.py`
-
-You can find out more details by using the `--help` flag.
-  - `<grpc_root_dir>$ tools/run_tests/stress_test/run_on_gke.py --help`
-
-> **Example**
-> ```bash
-> $ # Change to the grpc root directory
-> $ cd $GRPC_ROOT
-> $ tools/run_tests/stress_test/run_on_gke.py --project_id=sree-gce --config_file=tools/run_tests/stress_test/configs/opt.json
-> ```
-
-> The above runs the stress test on GKE under the project `sree-gce` in the default cluster (that you set by `gcloud` command earlier). The test settings (like number of client instances, servers, the parmeters to pass, test cases etc) are all loaded from the config file `$GRPC_ROOT/tools/run_tests/stress_test/opt.json`
diff --git a/tools/run_tests/stress_test/STRESS_CLIENT_SPEC.md b/tools/run_tests/stress_test/STRESS_CLIENT_SPEC.md
deleted file mode 100644
index 9f079beebc012d6a8f2e5a672d87cebbba49f0bb..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/STRESS_CLIENT_SPEC.md
+++ /dev/null
@@ -1,25 +0,0 @@
-Stress Test client Specification
-=========================
-This document specifies the features a stress test client should implement in order to work with the stress testing framework. The stress test clients are executed against the existing interop test servers.
-
-**Requirements**
---------------
-**1.** A stress test client should be able to repeatedly execute one or more of the existing 'interop test cases'. It may just be a wrapper around the existing interop test client. The exact command line arguments the client should support are listed in _Table 1_ below.
-
-**2.** The stress test client must implement a metrics server defined by _[metrics.proto](https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/metrics.proto)_ and must expose _qps_ as a `Long`-valued Gauge. The client can track the overall _qps_ in one Gauge or in multiple Gauges (for example: One per Channel or Stub).
- The framework periodically queries the _qps_ by calling the `GetAllGauges()` method (the framework assumes that all the returned Gauges are _qps_ Gauges and adds them up to determine the final qps) and uses this to determine if the stress test client is running or crashed or stalled.
-> *Note:* In this context, the term  _**qps**_  means _interop test cases per second_  (not _messages per second_ or _rpc calls per second_)
-
-
-**Table 1:** Command line arguments that should be supported by the stress test client.
-
->_**Note** The current C++ [stress client](https://github.com/grpc/grpc/blob/master/test/cpp/interop/stress_test.cc) supports more flags than those listed here but those flags will soon be deprecated._
-
-Parameter             |                    Description
-----------------------|---------------------------------
-`--server_addresses`    | The stress client should accept a list of server addresses in the following format:<br> ```<name_1>:<port_1>,<name_2>:<port_2>..<name_N>:<port_N>``` <br> _Note:_ `<name>` can be either server name or IP address.<br><br>_Type:_ string <br>_default:_ ```localhost:8080``` <br>_Example:_ ``foo.foobar.com:8080,bar.foobar.com:8080`` <br><br> Currently, the stress test framework only passes one server address to the client.
-`--test_cases`        |   List of test cases along with the relative weights in the following format:<br> `<testcase_1:w_1>,<testcase_2:w_2>...<testcase_n:w_n>`. <br> The test cases names are the same as those currently used by the interop clients<br><br>_Type:_ string <br>_Example:_ `empty_unary:20,large_unary:10,empty_stream:70` <br>(The stress client would then make `empty_unary` calls 20% of the time, `large_unary` calls 10% of the time and `empty_stream` calls 70% of the time.) <br>_Note:_ The weights need not add up to 100.
-`--test_duration_secs`      | The test duration in seconds. A value of -1 means that the test should run forever until forcefully terminated. <br>_Type:_ int <br>_default:_ -1
-`--num_channels_per_server` | Number of channels (i.e connections) to each server. <br> _Type:_ int <br> _default:_ 1 <br><br> _Note:_ Unfortunately, the term `channel` is used differently in `grpc-java` and `C based grpc`. In this context, this really means "number of connections to the server"
-`--num_stubs_per_channel `  | Number of client stubs per each connection to server.<br>_Type:_ int <br>_default:_ 1
-`--metrics_port`            | The port at which the stress client exposes [QPS metrics](https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/metrics.proto). <br>_Type:_ int <br>_default:_ 8081
diff --git a/tools/run_tests/stress_test/configs/asan.json b/tools/run_tests/stress_test/configs/asan.json
deleted file mode 100644
index 7ae11ccbf1e77a54d29823e74ad8451e454f5d18..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/asan.json
+++ /dev/null
@@ -1,85 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_cxx_asan" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "asan"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 120,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "cxx_client_asan": {
-        "baseTemplate": "default",
-        "stressClientCmd": ["/var/local/git/grpc/bins/asan/stress_test"],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/asan/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "cxx_server_asan": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/asan/interop_server"]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-asan": {
-        "serverTemplate": "cxx_server_asan",
-        "dockerImage": "grpc_stress_cxx_asan",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-asan": {
-        "clientTemplate": "cxx_client_asan",
-        "dockerImage": "grpc_stress_cxx_asan",
-        "numInstances": 5,
-        "serverPodSpec": "stress-server-asan"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8003,
-    "datasetIdNamePrefix": "stress_test_asan",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/csharp.json b/tools/run_tests/stress_test/configs/csharp.json
deleted file mode 100644
index c438e08964cca385d05a2204d96e5fa40ee0cb02..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/csharp.json
+++ /dev/null
@@ -1,91 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_csharp" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_csharp"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 100,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true",
-          "deadline_secs": 60
-        }
-      }
-    },
-    "templates": {
-      "csharp_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "mono",
-          "/var/local/git/grpc/src/csharp/Grpc.IntegrationTesting.StressClient/bin/Debug/Grpc.IntegrationTesting.StressClient.exe"
-		],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "csharp_server": {
-        "baseTemplate": "default",
-        "stressServerCmd": [
-          "mono",
-          "/var/local/git/grpc/src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/Grpc.IntegrationTesting.Server.exe"
-		]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-csharp": {
-        "serverTemplate": "csharp_server",
-        "dockerImage": "grpc_stress_csharp",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-csharp": {
-        "clientTemplate": "csharp_client",
-        "dockerImage": "grpc_stress_csharp",
-        "numInstances": 10,
-        "serverPodSpec": "stress-server-csharp"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 100,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8009,
-    "datasetIdNamePrefix": "stress_test_csharp",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/go.json b/tools/run_tests/stress_test/configs/go.json
deleted file mode 100644
index f1b2b523d3cb0cec2679f8df278f63f00ae9fb4b..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/go.json
+++ /dev/null
@@ -1,96 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_go" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_go"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "go_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "go",
-          "run",
-          "/go/src/google.golang.org/grpc/stress/client/main.go"
-        ],
-        "metricsClientCmd": [
-          "go",
-          "run",
-          "/go/src/google.golang.org/grpc/stress/metrics_client/main.go"
-        ]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "go_server": {
-        "baseTemplate": "default",
-        "stressServerCmd": [
-          "go",
-          "run",
-          "/go/src/google.golang.org/grpc/interop/server/server.go"
-        ]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "go-stress-server": {
-        "serverTemplate": "go_server",
-        "dockerImage": "grpc_stress_go",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "go-stress-client": {
-        "clientTemplate": "go_client",
-        "dockerImage": "grpc_stress_go",
-        "numInstances": 15,
-        "serverPodSpec": "go-stress-server"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8007,
-    "datasetIdNamePrefix": "stress_test_go",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/java.json b/tools/run_tests/stress_test/configs/java.json
deleted file mode 100644
index 92af63c6b55189b9779ef2636445a022095c00c6..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/java.json
+++ /dev/null
@@ -1,98 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_java" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_java"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 100,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true",
-          "deadline_secs": 60
-        },
-        "env": {
-          "STRESSTEST_CLIENT_OPTS":"-Xmx3g -Xms3g -XX:NewSize=1500m -XX:MaxNewSize=1500m -XX:+UseConcMarkSweepGC"
-        }
-      }
-    },
-    "templates": {
-      "java_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "/var/local/git/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/stresstest-client"
-        ],
-        "metricsClientCmd": [
-          "/var/local/git/grpc/bins/opt/metrics_client"
-        ]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080,
-          "use_tls": "false"
-        },
-        "env": {
-          "TEST_SERVER_OPTS":"-Xmx3g -Xms3g -XX:NewSize=1500m -XX:MaxNewSize=1500m  -XX:+UseConcMarkSweepGC"
-        }
-      }
-    },
-    "templates": {
-      "java_server": {
-        "baseTemplate": "default",
-        "stressServerCmd": [
-          "/var/local/git/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/test-server"
-        ]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "java-stress-server": {
-        "serverTemplate": "java_server",
-        "dockerImage": "grpc_stress_java",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "java-stress-client": {
-        "clientTemplate": "java_client",
-        "dockerImage": "grpc_stress_java",
-        "numInstances": 10,
-        "serverPodSpec": "java-stress-server"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 100,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8008,
-    "datasetIdNamePrefix": "stress_test_java",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/node-cxx.json b/tools/run_tests/stress_test/configs/node-cxx.json
deleted file mode 100644
index 094c1236e71b43a8af96e9470cb3b2a6099088cf..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/node-cxx.json
+++ /dev/null
@@ -1,97 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "opt"
-    },
-   "grpc_stress_node": {
-     "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-     "dockerFileDir": "grpc_interop_stress_node"
-   }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "node_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
-          "node",
-          "/var/local/git/grpc/src/node/stress/stress_client.js"
-        ],
-        "metricsClientCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
-          "node",
-          "/var/local/git/grpc/src/node/stress/metrics_client.js"
-        ]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "cxx_server_opt": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-cxx-opt": {
-        "serverTemplate": "cxx_server_opt",
-        "dockerImage": "grpc_stress_cxx_opt",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-node": {
-        "clientTemplate": "node_client",
-        "dockerImage": "grpc_stress_node",
-        "numInstances": 20,
-        "serverPodSpec": "stress-server-cxx-opt"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8006,
-    "datasetIdNamePrefix": "stress_test_node_cxx_opt",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/node.json b/tools/run_tests/stress_test/configs/node.json
deleted file mode 100644
index 85eb9e0003158e63ae696bbd1035c7240aa23395..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/node.json
+++ /dev/null
@@ -1,96 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_node" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_node"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "node_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
-          "node",
-          "/var/local/git/grpc/src/node/stress/stress_client.js"
-        ],
-        "metricsClientCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
-          "node",
-          "/var/local/git/grpc/src/node/stress/metrics_client.js"
-        ]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "node_server": {
-        "baseTemplate": "default",
-        "stressServerCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
-          "node",
-          "/var/local/git/grpc/src/node/interop/interop_server.js"
-        ]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "node-stress-server": {
-        "serverTemplate": "node_server",
-        "dockerImage": "grpc_stress_node",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "node-stress-client": {
-        "clientTemplate": "node_client",
-        "dockerImage": "grpc_stress_node",
-        "numInstances": 15,
-        "serverPodSpec": "node-stress-server"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8005,
-    "datasetIdNamePrefix": "stress_test_node",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/opt-tsan-asan.json b/tools/run_tests/stress_test/configs/opt-tsan-asan.json
deleted file mode 100644
index fcb3678c02facb59442590ae8284ee3ca8c6e77b..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/opt-tsan-asan.json
+++ /dev/null
@@ -1,134 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "opt"
-    },
-    "grpc_stress_cxx_tsan": {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "tsan"
-    },
-    "grpc_stress_cxx_asan": {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "asan"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "cxx_client_opt": {
-        "baseTemplate": "default",
-        "stressClientCmd": ["/var/local/git/grpc/bins/opt/stress_test"],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
-      },
-      "cxx_client_tsan": {
-        "baseTemplate": "default",
-        "stressClientCmd": ["/var/local/git/grpc/bins/tsan/stress_test"],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/tsan/metrics_client"]
-      },
-    "cxx_client_asan": {
-        "baseTemplate": "default",
-        "stressClientCmd": ["/var/local/git/grpc/bins/asan/stress_test"],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/asan/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "cxx_server_opt": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
-      },
-      "cxx_server_tsan": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/tsan/interop_server"]
-      },
-    "cxx_server_asan": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/asan/interop_server"]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-opt": {
-        "serverTemplate": "cxx_server_opt",
-        "dockerImage": "grpc_stress_cxx_opt",
-        "numInstances": 1
-      },
-      "stress-server-tsan": {
-        "serverTemplate": "cxx_server_tsan",
-        "dockerImage": "grpc_stress_cxx_tsan",
-        "numInstances": 1
-      },
-      "stress-server-asan": {
-        "serverTemplate": "cxx_server_asan",
-        "dockerImage": "grpc_stress_cxx_asan",
-        "numInstances": 1
-      }
-   },
-
-    "clientPodSpecs": {
-      "stress-client-opt": {
-        "clientTemplate": "cxx_client_opt",
-        "dockerImage": "grpc_stress_cxx_opt",
-        "numInstances": 5,
-        "serverPodSpec": "stress-server-opt"
-      },
-      "stress-client-tsan": {
-        "clientTemplate": "cxx_client_tsan",
-        "dockerImage": "grpc_stress_cxx_tsan",
-        "numInstances": 10,
-        "serverPodSpec": "stress-server-tsan"
-      },
-      "stress-client-asan": {
-        "clientTemplate": "cxx_client_asan",
-        "dockerImage": "grpc_stress_cxx_asan",
-        "numInstances": 10,
-        "serverPodSpec": "stress-server-asan"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8004,
-    "datasetIdNamePrefix": "stress_test_opt_tsan",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
diff --git a/tools/run_tests/stress_test/configs/opt.json b/tools/run_tests/stress_test/configs/opt.json
deleted file mode 100644
index 5e0e930d45307970b27adcdc3149dce03f311a79..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/opt.json
+++ /dev/null
@@ -1,85 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "opt"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "cxx_client_opt": {
-        "baseTemplate": "default",
-        "stressClientCmd": ["/var/local/git/grpc/bins/opt/stress_test"],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "cxx_server_opt": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-opt": {
-        "serverTemplate": "cxx_server_opt",
-        "dockerImage": "grpc_stress_cxx_opt",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-opt": {
-        "clientTemplate": "cxx_client_opt",
-        "dockerImage": "grpc_stress_cxx_opt",
-        "numInstances": 15,
-        "serverPodSpec": "stress-server-opt"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8001,
-    "datasetIdNamePrefix": "stress_test_opt",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/php-cxx.json b/tools/run_tests/stress_test/configs/php-cxx.json
deleted file mode 100644
index 03254b368c86c1ca87850e9b0bb66260be40184d..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/php-cxx.json
+++ /dev/null
@@ -1,93 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "opt"
-    },
-   "grpc_stress_php": {
-     "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-     "dockerFileDir": "grpc_interop_stress_php"
-   }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081"
-        }
-      }
-    },
-    "templates": {
-      "php_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "/var/local/git/grpc/src/php/bin/stress_client.sh"
-        ],
-        "metricsClientCmd": [
-          "php",
-          "/var/local/git/grpc/src/php/tests/interop/metrics_client.php"
-        ]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "cxx_server_opt": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-cxx-php": {
-        "serverTemplate": "cxx_server_opt",
-        "dockerImage": "grpc_stress_cxx_opt",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-php": {
-        "clientTemplate": "php_client",
-        "dockerImage": "grpc_stress_php",
-        "numInstances": 20,
-        "serverPodSpec": "stress-server-cxx-php"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8010,
-    "datasetIdNamePrefix": "stress_test_php_cxx_opt",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/python.json b/tools/run_tests/stress_test/configs/python.json
deleted file mode 100644
index 4f85de1d5f6d40d493f98301dce2016d09e71010..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/python.json
+++ /dev/null
@@ -1,98 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_python" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_python"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        },
-		"env": {
-          "PYTHONPATH": "/var/local/git/grpc/src/python/gens:/var/local/git/grpc/src/python/grpcio",
-          "LD_LIBRARY_PATH":"/var/local/git/grpc/libs/opt"
-        }
-      }
-    },
-    "templates": {
-      "python_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "python",
-          "/var/local/git/grpc/src/python/grpcio/tests/stress/client.py"
-        ],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        },
-        "env": {
-          "PYTHONPATH": "/var/local/git/grpc/src/python/gens:/var/local/git/grpc/src/python/grpcio",
-          "LD_LIBRARY_PATH":"/var/local/git/grpc/libs/opt"
-        }
-      }
-    },
-    "templates": {
-      "python_server": {
-        "baseTemplate": "default",
-        "stressServerCmd": [
-          "python",
-          "/var/local/git/grpc/src/python/grpcio/tests/interop/server.py"
-        ]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "python-stress-server": {
-        "serverTemplate": "python_server",
-        "dockerImage": "grpc_stress_python",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "python-stress-client": {
-        "clientTemplate": "python_client",
-        "dockerImage": "grpc_stress_python",
-        "numInstances": 5,
-        "serverPodSpec": "python-stress-server"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8011,
-    "datasetIdNamePrefix": "stress_test_python",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/ruby.json b/tools/run_tests/stress_test/configs/ruby.json
deleted file mode 100644
index 7e2afcbb69e5f2ef6a048bf04205d345860f5b98..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/ruby.json
+++ /dev/null
@@ -1,92 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_ruby" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_ruby"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 60,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "ruby_client": {
-        "baseTemplate": "default",
-        "stressClientCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_ruby.sh",
-          "ruby",
-          "/var/local/git/grpc/src/ruby/stress/stress_client.rb"
-        ],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "ruby_server": {
-        "baseTemplate": "default",
-        "stressServerCmd": [
-          "/var/local/git/grpc/tools/gcp/stress_test/run_ruby.sh",
-          "ruby",
-          "/var/local/git/grpc/src/ruby/pb/test/server.rb"
-        ]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-ruby": {
-        "serverTemplate": "ruby_server",
-        "dockerImage": "grpc_stress_ruby",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-ruby": {
-        "clientTemplate": "ruby_client",
-        "dockerImage": "grpc_stress_ruby",
-        "numInstances": 10,
-        "serverPodSpec": "stress-server-ruby"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8001,
-    "datasetIdNamePrefix": "stress_test_ruby",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/configs/tsan.json b/tools/run_tests/stress_test/configs/tsan.json
deleted file mode 100644
index abc759c79daef226d0894a2c1271b9c3347f4aa1..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/configs/tsan.json
+++ /dev/null
@@ -1,85 +0,0 @@
-{
-  "dockerImages": {
-    "grpc_stress_cxx_tsan" : {
-      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
-      "dockerFileDir": "grpc_interop_stress_cxx",
-      "buildType": "tsan"
-    }
-  },
-
-  "clientTemplates": {
-    "baseTemplates": {
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
-        "pollIntervalSecs": 120,
-        "clientArgs": {
-          "num_channels_per_server":5,
-          "num_stubs_per_channel":10,
-          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
-          "metrics_port": 8081
-        },
-        "metricsPort": 8081,
-        "metricsArgs": {
-          "metrics_server_address": "localhost:8081",
-          "total_only": "true"
-        }
-      }
-    },
-    "templates": {
-      "cxx_client_tsan": {
-        "baseTemplate": "default",
-        "stressClientCmd": ["/var/local/git/grpc/bins/tsan/stress_test"],
-        "metricsClientCmd": ["/var/local/git/grpc/bins/tsan/metrics_client"]
-      }
-    }
-  },
-
-  "serverTemplates": {
-    "baseTemplates":{
-      "default": {
-        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
-        "serverPort": 8080,
-        "serverArgs": {
-          "port": 8080
-        }
-      }
-    },
-    "templates": {
-      "cxx_server_tsan": {
-        "baseTemplate": "default",
-        "stressServerCmd": ["/var/local/git/grpc/bins/tsan/interop_server"]
-      }
-    }
-  },
-
-  "testMatrix": {
-    "serverPodSpecs": {
-      "stress-server-tsan": {
-        "serverTemplate": "cxx_server_tsan",
-        "dockerImage": "grpc_stress_cxx_tsan",
-        "numInstances": 1
-      }
-    },
-
-    "clientPodSpecs": {
-      "stress-client-tsan": {
-        "clientTemplate": "cxx_client_tsan",
-        "dockerImage": "grpc_stress_cxx_tsan",
-        "numInstances": 5,
-        "serverPodSpec": "stress-server-tsan"
-      }
-    }
-  },
-
-  "globalSettings": {
-    "buildDockerImages": true,
-    "pollIntervalSecs": 60,
-    "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8002,
-    "datasetIdNamePrefix": "stress_test_tsan",
-    "summaryTableId": "summary",
-    "qpsTableId": "qps",
-    "podWarmupSecs": 60
-  }
-}
-
diff --git a/tools/run_tests/stress_test/print_summary.py b/tools/run_tests/stress_test/print_summary.py
deleted file mode 100755
index 6f4ada2f4fd23a6e341323f25f2f2b198ae71dee..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/print_summary.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import argparse
-import os
-import sys
-
-stress_test_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/stress_test'))
-sys.path.append(stress_test_utils_dir)
-from stress_test_utils import BigQueryHelper
-
-argp = argparse.ArgumentParser(
-    description='Print summary tables',
-    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-argp.add_argument('--gcp_project_id',
-                  required=True,
-                  help='The Google Cloud Platform Project Id')
-argp.add_argument('--dataset_id', type=str, required=True)
-argp.add_argument('--run_id', type=str, required=True)
-argp.add_argument('--summary_table_id', type=str, default='summary')
-argp.add_argument('--qps_table_id', type=str, default='qps')
-argp.add_argument('--summary_only', action='store_true', default=True)
-
-if __name__ == '__main__':
-  args = argp.parse_args()
-  bq_helper = BigQueryHelper(args.run_id, '', '', args.gcp_project_id,
-                             args.dataset_id, args.summary_table_id,
-                             args.qps_table_id)
-  bq_helper.initialize()
-  if not args.summary_only:
-    bq_helper.print_qps_records()
-  bq_helper.print_summary_records()
diff --git a/tools/run_tests/stress_test/run_on_gke.py b/tools/run_tests/stress_test/run_on_gke.py
deleted file mode 100755
index b190ebde7a4f44d697b7eab4b23515755a6553d6..0000000000000000000000000000000000000000
--- a/tools/run_tests/stress_test/run_on_gke.py
+++ /dev/null
@@ -1,674 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015-2016, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import print_function
-
-import argparse
-import datetime
-import json
-import os
-import subprocess
-import sys
-import time
-
-stress_test_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/stress_test'))
-sys.path.append(stress_test_utils_dir)
-from stress_test_utils import BigQueryHelper
-
-kubernetes_api_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
-sys.path.append(kubernetes_api_dir)
-
-import kubernetes_api
-
-
-class GlobalSettings:
-
-  def __init__(self, gcp_project_id, build_docker_images,
-               test_poll_interval_secs, test_duration_secs,
-               kubernetes_proxy_port, dataset_id_prefix, summary_table_id,
-               qps_table_id, pod_warmup_secs):
-    self.gcp_project_id = gcp_project_id
-    self.build_docker_images = build_docker_images
-    self.test_poll_interval_secs = test_poll_interval_secs
-    self.test_duration_secs = test_duration_secs
-    self.kubernetes_proxy_port = kubernetes_proxy_port
-    self.dataset_id_prefix = dataset_id_prefix
-    self.summary_table_id = summary_table_id
-    self.qps_table_id = qps_table_id
-    self.pod_warmup_secs = pod_warmup_secs
-
-
-class ClientTemplate:
-  """ Contains all the common settings that are used by a stress client """
-
-  def __init__(self, name, stress_client_cmd, metrics_client_cmd, metrics_port,
-               wrapper_script_path, poll_interval_secs, client_args_dict,
-               metrics_args_dict, will_run_forever, env_dict):
-    self.name = name
-    self.stress_client_cmd = stress_client_cmd
-    self.metrics_client_cmd = metrics_client_cmd
-    self.metrics_port = metrics_port
-    self.wrapper_script_path = wrapper_script_path
-    self.poll_interval_secs = poll_interval_secs
-    self.client_args_dict = client_args_dict
-    self.metrics_args_dict = metrics_args_dict
-    self.will_run_forever = will_run_forever
-    self.env_dict = env_dict
-
-
-class ServerTemplate:
-  """ Contains all the common settings used by a stress server """
-
-  def __init__(self, name, server_cmd, wrapper_script_path, server_port,
-               server_args_dict, will_run_forever, env_dict):
-    self.name = name
-    self.server_cmd = server_cmd
-    self.wrapper_script_path = wrapper_script_path
-    self.server_port = server_port
-    self.server_args_dict = server_args_dict
-    self.will_run_forever = will_run_forever
-    self.env_dict = env_dict
-
-
-class DockerImage:
-  """ Represents properties of a Docker image. Provides methods to build the
-  image and push it to GKE registry
-  """
-
-  def __init__(self, gcp_project_id, image_name, build_script_path,
-               dockerfile_dir, build_type):
-    """Args:
-
-      image_name: The docker image name
-      tag_name: The additional tag name. This is the name used when pushing the
-        docker image to GKE registry
-      build_script_path: The path to the build script that builds this docker
-      image
-      dockerfile_dir: The name of the directory under
-      '<grpc_root>/tools/dockerfile' that contains the dockerfile
-    """
-    self.image_name = image_name
-    self.gcp_project_id = gcp_project_id
-    self.build_script_path = build_script_path
-    self.dockerfile_dir = dockerfile_dir
-    self.build_type = build_type
-    self.tag_name = self._make_tag_name(gcp_project_id, image_name)
-
-  def _make_tag_name(self, project_id, image_name):
-    return 'gcr.io/%s/%s' % (project_id, image_name)
-
-  def build_image(self):
-    print('Building docker image: %s (tag: %s)' % (self.image_name,
-                                                   self.tag_name))
-    os.environ['INTEROP_IMAGE'] = self.image_name
-    os.environ['INTEROP_IMAGE_REPOSITORY_TAG'] = self.tag_name
-    os.environ['BASE_NAME'] = self.dockerfile_dir
-    os.environ['BUILD_TYPE'] = self.build_type
-    print('DEBUG: path: ', self.build_script_path)
-    if subprocess.call(args=[self.build_script_path]) != 0:
-      print('Error in building the Docker image')
-      return False
-    return True
-
-  def push_to_gke_registry(self):
-    cmd = ['gcloud', 'docker', 'push', self.tag_name]
-    print('Pushing %s to the GKE registry..' % self.tag_name)
-    if subprocess.call(args=cmd) != 0:
-      print('Error in pushing the image %s to the GKE registry' %
-            self.tag_name)
-      return False
-    return True
-
-
-class ServerPodSpec:
-  """ Contains the information required to launch server pods. """
-
-  def __init__(self, name, server_template, docker_image, num_instances):
-    self.name = name
-    self.template = server_template
-    self.docker_image = docker_image
-    self.num_instances = num_instances
-
-  def pod_names(self):
-    """ Return a list of names of server pods to create. """
-    return ['%s-%d' % (self.name, i) for i in range(1, self.num_instances + 1)]
-
-  def server_addresses(self):
-    """ Return string of server addresses in the following format:
-      '<server_pod_name_1>:<server_port>,<server_pod_name_2>:<server_port>...'
-    """
-    return ','.join(['%s:%d' % (pod_name, self.template.server_port)
-                     for pod_name in self.pod_names()])
-
-
-class ClientPodSpec:
-  """ Contains the information required to launch client pods """
-
-  def __init__(self, name, client_template, docker_image, num_instances,
-               server_addresses):
-    self.name = name
-    self.template = client_template
-    self.docker_image = docker_image
-    self.num_instances = num_instances
-    self.server_addresses = server_addresses
-
-  def pod_names(self):
-    """ Return a list of names of client pods to create """
-    return ['%s-%d' % (self.name, i) for i in range(1, self.num_instances + 1)]
-
-  # The client args in the template do not have server addresses. This function
-  # adds the server addresses and returns the updated client args
-  def get_client_args_dict(self):
-    args_dict = self.template.client_args_dict.copy()
-    args_dict['server_addresses'] = self.server_addresses
-    return args_dict
-
-
-class Gke:
-  """ Class that has helper methods to interact with GKE """
-
-  class KubernetesProxy:
-    """Class to start a proxy on localhost to talk to the Kubernetes API server"""
-
-    def __init__(self, port):
-      cmd = ['kubectl', 'proxy', '--port=%d' % port]
-      self.p = subprocess.Popen(args=cmd)
-      time.sleep(2)
-      print('\nStarted kubernetes proxy on port: %d' % port)
-
-    def __del__(self):
-      if self.p is not None:
-        print('Shutting down Kubernetes proxy..')
-        self.p.kill()
-
-  def __init__(self, project_id, run_id, dataset_id, summary_table_id,
-               qps_table_id, kubernetes_port):
-    self.project_id = project_id
-    self.run_id = run_id
-    self.dataset_id = dataset_id
-    self.summary_table_id = summary_table_id
-    self.qps_table_id = qps_table_id
-
-    # The environment variables we would like to pass to every pod (both client
-    # and server) launched in GKE
-    self.gke_env = {
-        'RUN_ID': self.run_id,
-        'GCP_PROJECT_ID': self.project_id,
-        'DATASET_ID': self.dataset_id,
-        'SUMMARY_TABLE_ID': self.summary_table_id,
-        'QPS_TABLE_ID': self.qps_table_id
-    }
-
-    self.kubernetes_port = kubernetes_port
-    # Start kubernetes proxy
-    self.kubernetes_proxy = Gke.KubernetesProxy(kubernetes_port)
-
-  def _args_dict_to_str(self, args_dict):
-    return ' '.join('--%s=%s' % (k, args_dict[k]) for k in args_dict.keys())
-
-  def launch_servers(self, server_pod_spec):
-    is_success = True
-
-    # The command to run inside the container is the wrapper script (which then
-    # launches the actual server)
-    container_cmd = server_pod_spec.template.wrapper_script_path
-
-    # The parameters to the wrapper script (defined in
-    # server_pod_spec.template.wrapper_script_path) are are injected into the
-    # container via environment variables
-    server_env = self.gke_env.copy()
-    server_env.update(server_pod_spec.template.env_dict)
-    server_env.update({
-        'STRESS_TEST_IMAGE_TYPE': 'SERVER',
-        'STRESS_TEST_CMD': server_pod_spec.template.server_cmd,
-        'STRESS_TEST_ARGS_STR': self._args_dict_to_str(
-            server_pod_spec.template.server_args_dict),
-        'WILL_RUN_FOREVER': str(server_pod_spec.template.will_run_forever)
-    })
-
-    for pod_name in server_pod_spec.pod_names():
-      server_env['POD_NAME'] = pod_name
-      print('Creating server: %s' % pod_name)
-      is_success = kubernetes_api.create_pod_and_service(
-          'localhost',
-          self.kubernetes_port,
-          'default',  # Use 'default' namespace
-          pod_name,
-          server_pod_spec.docker_image.tag_name,
-          [server_pod_spec.template.server_port],  # Ports to expose on the pod
-          [container_cmd],
-          [],  # Args list is empty since we are passing all args via env variables
-          server_env,
-          True  # Headless = True for server to that GKE creates a DNS record for pod_name
-      )
-      if not is_success:
-        print('Error in launching server: %s' % pod_name)
-        break
-
-    if is_success:
-      print('Successfully created server(s)')
-
-    return is_success
-
-  def launch_clients(self, client_pod_spec):
-    is_success = True
-
-    # The command to run inside the container is the wrapper script (which then
-    # launches the actual stress client)
-    container_cmd = client_pod_spec.template.wrapper_script_path
-
-    # The parameters to the wrapper script (defined in
-    # client_pod_spec.template.wrapper_script_path) are are injected into the
-    # container via environment variables
-    client_env = self.gke_env.copy()
-    client_env.update(client_pod_spec.template.env_dict)
-    client_env.update({
-        'STRESS_TEST_IMAGE_TYPE': 'CLIENT',
-        'STRESS_TEST_CMD': client_pod_spec.template.stress_client_cmd,
-        'STRESS_TEST_ARGS_STR': self._args_dict_to_str(
-            client_pod_spec.get_client_args_dict()),
-        'METRICS_CLIENT_CMD': client_pod_spec.template.metrics_client_cmd,
-        'METRICS_CLIENT_ARGS_STR': self._args_dict_to_str(
-            client_pod_spec.template.metrics_args_dict),
-        'POLL_INTERVAL_SECS': str(client_pod_spec.template.poll_interval_secs),
-        'WILL_RUN_FOREVER': str(client_pod_spec.template.will_run_forever)
-    })
-
-    for pod_name in client_pod_spec.pod_names():
-      client_env['POD_NAME'] = pod_name
-      print('Creating client: %s' % pod_name)
-      is_success = kubernetes_api.create_pod_and_service(
-          'localhost',
-          self.kubernetes_port,
-          'default',  # default namespace,
-          pod_name,
-          client_pod_spec.docker_image.tag_name,
-          [client_pod_spec.template.metrics_port],  # Ports to expose on the pod
-          [container_cmd],
-          [],  # Empty args list since all args are passed via env variables
-          client_env,
-          True  # Client is a headless service (no need for an external ip)
-      )
-
-      if not is_success:
-        print('Error in launching client %s' % pod_name)
-        break
-
-    if is_success:
-      print('Successfully created all client(s)')
-
-    return is_success
-
-  def _delete_pods(self, pod_name_list):
-    is_success = True
-    for pod_name in pod_name_list:
-      print('Deleting %s' % pod_name)
-      is_success = kubernetes_api.delete_pod_and_service(
-          'localhost',
-          self.kubernetes_port,
-          'default',  # default namespace
-          pod_name)
-
-      if not is_success:
-        print('Error in deleting pod %s' % pod_name)
-        break
-
-    if is_success:
-      print('Successfully deleted all pods')
-
-    return is_success
-
-  def delete_servers(self, server_pod_spec):
-    return self._delete_pods(server_pod_spec.pod_names())
-
-  def delete_clients(self, client_pod_spec):
-    return self._delete_pods(client_pod_spec.pod_names())
-
-
-class Config:
-
-  def __init__(self, config_filename, gcp_project_id):
-    print('Loading configuration...')
-    config_dict = self._load_config(config_filename)
-
-    self.global_settings = self._parse_global_settings(config_dict,
-                                                       gcp_project_id)
-    self.docker_images_dict = self._parse_docker_images(
-        config_dict, self.global_settings.gcp_project_id)
-    self.client_templates_dict = self._parse_client_templates(config_dict)
-    self.server_templates_dict = self._parse_server_templates(config_dict)
-    self.server_pod_specs_dict = self._parse_server_pod_specs(
-        config_dict, self.docker_images_dict, self.server_templates_dict)
-    self.client_pod_specs_dict = self._parse_client_pod_specs(
-        config_dict, self.docker_images_dict, self.client_templates_dict,
-        self.server_pod_specs_dict)
-    print('Loaded Configuaration.')
-
-  def _parse_global_settings(self, config_dict, gcp_project_id):
-    global_settings_dict = config_dict['globalSettings']
-    return GlobalSettings(gcp_project_id,
-                          global_settings_dict['buildDockerImages'],
-                          global_settings_dict['pollIntervalSecs'],
-                          global_settings_dict['testDurationSecs'],
-                          global_settings_dict['kubernetesProxyPort'],
-                          global_settings_dict['datasetIdNamePrefix'],
-                          global_settings_dict['summaryTableId'],
-                          global_settings_dict['qpsTableId'],
-                          global_settings_dict['podWarmupSecs'])
-
-  def _parse_docker_images(self, config_dict, gcp_project_id):
-    """Parses the 'dockerImages' section of the config file and returns a
-    Dictionary of 'DockerImage' objects keyed by docker image names"""
-    docker_images_dict = {}
-
-    docker_config_dict = config_dict['dockerImages']
-    for image_name in docker_config_dict.keys():
-      build_script_path = docker_config_dict[image_name]['buildScript']
-      dockerfile_dir = docker_config_dict[image_name]['dockerFileDir']
-      build_type = docker_config_dict[image_name].get('buildType', 'opt')
-      docker_images_dict[image_name] = DockerImage(gcp_project_id, image_name,
-                                                   build_script_path,
-                                                   dockerfile_dir, build_type)
-    return docker_images_dict
-
-  def _parse_client_templates(self, config_dict):
-    """Parses the 'clientTemplates' section of the config file and returns a
-    Dictionary of 'ClientTemplate' objects keyed by client template names.
-
-    Note: The 'baseTemplates' sub section of the config file contains templates
-    with default values  and the 'templates' sub section contains the actual
-    client templates (which refer to the base template name to use for default
-    values).
-    """
-    client_templates_dict = {}
-
-    templates_dict = config_dict['clientTemplates']['templates']
-    base_templates_dict = config_dict['clientTemplates'].get('baseTemplates',
-                                                             {})
-    for template_name in templates_dict.keys():
-      # temp_dict is a temporary dictionary that merges base template dictionary
-      # and client template dictionary (with client template dictionary values
-      # overriding base template values)
-      temp_dict = {}
-
-      base_template_name = templates_dict[template_name].get('baseTemplate')
-      if not base_template_name is None:
-        temp_dict = base_templates_dict[base_template_name].copy()
-
-      temp_dict.update(templates_dict[template_name])
-
-      # Create and add ClientTemplate object to the final client_templates_dict
-      stress_client_cmd = ' '.join(temp_dict['stressClientCmd'])
-      metrics_client_cmd = ' '.join(temp_dict['metricsClientCmd'])
-      client_templates_dict[template_name] = ClientTemplate(
-          template_name, stress_client_cmd, metrics_client_cmd,
-          temp_dict['metricsPort'], temp_dict['wrapperScriptPath'],
-          temp_dict['pollIntervalSecs'], temp_dict['clientArgs'].copy(),
-          temp_dict['metricsArgs'].copy(), temp_dict.get('willRunForever', 1),
-          temp_dict.get('env', {}).copy())
-
-    return client_templates_dict
-
-  def _parse_server_templates(self, config_dict):
-    """Parses the 'serverTemplates' section of the config file and returns a
-    Dictionary of 'serverTemplate' objects keyed by server template names.
-
-    Note: The 'baseTemplates' sub section of the config file contains templates
-    with default values  and the 'templates' sub section contains the actual
-    server templates (which refer to the base template name to use for default
-    values).
-    """
-    server_templates_dict = {}
-
-    templates_dict = config_dict['serverTemplates']['templates']
-    base_templates_dict = config_dict['serverTemplates'].get('baseTemplates',
-                                                             {})
-
-    for template_name in templates_dict.keys():
-      # temp_dict is a temporary dictionary that merges base template dictionary
-      # and server template dictionary (with server template dictionary values
-      # overriding base template values)
-      temp_dict = {}
-
-      base_template_name = templates_dict[template_name].get('baseTemplate')
-      if not base_template_name is None:
-        temp_dict = base_templates_dict[base_template_name].copy()
-
-      temp_dict.update(templates_dict[template_name])
-
-      # Create and add ServerTemplate object to the final server_templates_dict
-      stress_server_cmd = ' '.join(temp_dict['stressServerCmd'])
-      server_templates_dict[template_name] = ServerTemplate(
-          template_name, stress_server_cmd, temp_dict['wrapperScriptPath'],
-          temp_dict['serverPort'], temp_dict['serverArgs'].copy(),
-          temp_dict.get('willRunForever', 1), temp_dict.get('env', {}).copy())
-
-    return server_templates_dict
-
-  def _parse_server_pod_specs(self, config_dict, docker_images_dict,
-                              server_templates_dict):
-    """Parses the 'serverPodSpecs' sub-section (under 'testMatrix' section) of
-    the config file and returns a Dictionary of 'ServerPodSpec' objects keyed
-    by server pod spec names"""
-    server_pod_specs_dict = {}
-
-    pod_specs_dict = config_dict['testMatrix'].get('serverPodSpecs', {})
-
-    for pod_name in pod_specs_dict.keys():
-      server_template_name = pod_specs_dict[pod_name]['serverTemplate']
-      docker_image_name = pod_specs_dict[pod_name]['dockerImage']
-      num_instances = pod_specs_dict[pod_name].get('numInstances', 1)
-
-      # Create and add the ServerPodSpec object to the final
-      # server_pod_specs_dict
-      server_pod_specs_dict[pod_name] = ServerPodSpec(
-          pod_name, server_templates_dict[server_template_name],
-          docker_images_dict[docker_image_name], num_instances)
-
-    return server_pod_specs_dict
-
-  def _parse_client_pod_specs(self, config_dict, docker_images_dict,
-                              client_templates_dict, server_pod_specs_dict):
-    """Parses the 'clientPodSpecs' sub-section (under 'testMatrix' section) of
-    the config file and returns a Dictionary of 'ClientPodSpec' objects keyed
-    by client pod spec names"""
-    client_pod_specs_dict = {}
-
-    pod_specs_dict = config_dict['testMatrix'].get('clientPodSpecs', {})
-    for pod_name in pod_specs_dict.keys():
-      client_template_name = pod_specs_dict[pod_name]['clientTemplate']
-      docker_image_name = pod_specs_dict[pod_name]['dockerImage']
-      num_instances = pod_specs_dict[pod_name]['numInstances']
-
-      # Get the server addresses from the server pod spec object
-      server_pod_spec_name = pod_specs_dict[pod_name]['serverPodSpec']
-      server_addresses = server_pod_specs_dict[
-          server_pod_spec_name].server_addresses()
-
-      client_pod_specs_dict[pod_name] = ClientPodSpec(
-          pod_name, client_templates_dict[client_template_name],
-          docker_images_dict[docker_image_name], num_instances,
-          server_addresses)
-
-    return client_pod_specs_dict
-
-  def _load_config(self, config_filename):
-    """Opens the config file and converts the Json text to Dictionary"""
-    if not os.path.isabs(config_filename):
-      raise Exception('Config objects expects an absolute file path. '
-                      'config file name passed: %s' % config_filename)
-    with open(config_filename) as config_file:
-      return json.load(config_file)
-
-
-def run_tests(config):
-  """ The main function that launches the stress tests """
-  # Build docker images and push to GKE registry
-  if config.global_settings.build_docker_images:
-    for name, docker_image in config.docker_images_dict.iteritems():
-      if not (docker_image.build_image() and
-              docker_image.push_to_gke_registry()):
-        return False
-
-  # Create a unique id for this run (Note: Using timestamp instead of UUID to
-  # make it easier to deduce the date/time of the run just by looking at the run
-  # run id. This is useful in debugging when looking at records in Biq query)
-  run_id = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
-  dataset_id = '%s_%s' % (config.global_settings.dataset_id_prefix, run_id)
-  print('Run id:', run_id)
-  print('Dataset id:', dataset_id)
-
-  bq_helper = BigQueryHelper(run_id, '', '',
-                             config.global_settings.gcp_project_id, dataset_id,
-                             config.global_settings.summary_table_id,
-                             config.global_settings.qps_table_id)
-  bq_helper.initialize()
-
-  gke = Gke(config.global_settings.gcp_project_id, run_id, dataset_id,
-            config.global_settings.summary_table_id,
-            config.global_settings.qps_table_id,
-            config.global_settings.kubernetes_proxy_port)
-
-  is_success = True
-
-  try:
-    print('Launching servers..')
-    for name, server_pod_spec in config.server_pod_specs_dict.iteritems():
-      if not gke.launch_servers(server_pod_spec):
-        is_success = False  # is_success is checked in the 'finally' block
-        return False
-
-    print('Launched servers. Waiting for %d seconds for the server pods to be '
-          'fully online') % config.global_settings.pod_warmup_secs
-    time.sleep(config.global_settings.pod_warmup_secs)
-
-    for name, client_pod_spec in config.client_pod_specs_dict.iteritems():
-      if not gke.launch_clients(client_pod_spec):
-        is_success = False  # is_success is checked in the 'finally' block
-        return False
-
-    print('Launched all clients. Waiting for %d seconds for the client pods to '
-          'be fully online') % config.global_settings.pod_warmup_secs
-    time.sleep(config.global_settings.pod_warmup_secs)
-
-    start_time = datetime.datetime.now()
-    end_time = start_time + datetime.timedelta(
-        seconds=config.global_settings.test_duration_secs)
-    print('Running the test until %s' % end_time.isoformat())
-
-    while True:
-      if datetime.datetime.now() > end_time:
-        print('Test was run for %d seconds' %
-              config.global_settings.test_duration_secs)
-        break
-
-      # Check if either stress server or clients have failed (btw, the bq_helper
-      # monitors all the rows in the summary table and checks if any of them
-      # have a failure status)
-      if bq_helper.check_if_any_tests_failed():
-        is_success = False
-        print('Some tests failed.')
-        break  # Don't 'return' here. We still want to call bq_helper to print qps/summary tables
-
-      # Tests running fine. Wait until next poll time to check the status
-      print('Sleeping for %d seconds..' %
-            config.global_settings.test_poll_interval_secs)
-      time.sleep(config.global_settings.test_poll_interval_secs)
-
-    # Print BiqQuery tables
-    bq_helper.print_qps_records()
-    bq_helper.print_summary_records()
-
-  finally:
-    # If there was a test failure, we should not delete the pods since they
-    # would contain useful debug information (logs, core dumps etc)
-    if is_success:
-      for name, server_pod_spec in config.server_pod_specs_dict.iteritems():
-        gke.delete_servers(server_pod_spec)
-      for name, client_pod_spec in config.client_pod_specs_dict.iteritems():
-        gke.delete_clients(client_pod_spec)
-
-  return is_success
-
-
-def tear_down(config):
-  gke = Gke(config.global_settings.gcp_project_id, '', '',
-            config.global_settings.summary_table_id,
-            config.global_settings.qps_table_id,
-            config.global_settings.kubernetes_proxy_port)
-  for name, server_pod_spec in config.server_pod_specs_dict.iteritems():
-    gke.delete_servers(server_pod_spec)
-  for name, client_pod_spec in config.client_pod_specs_dict.iteritems():
-    gke.delete_clients(client_pod_spec)
-
-
-argp = argparse.ArgumentParser(
-    description='Launch stress tests in GKE',
-    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-argp.add_argument('--gcp_project_id',
-                  required=True,
-                  help='The Google Cloud Platform Project Id')
-argp.add_argument('--config_file',
-                  required=True,
-                  type=str,
-                  help='The test config file')
-argp.add_argument('--tear_down', action='store_true', default=False)
-
-if __name__ == '__main__':
-  args = argp.parse_args()
-
-  config_filename = args.config_file
-
-  # Since we will be changing the current working directory to grpc root in the
-  # next step, we should check if the config filename path is a relative path
-  # (i.e a path relative to the current working directory) and if so, convert it
-  # to abosulte path
-  if not os.path.isabs(config_filename):
-    config_filename = os.path.abspath(config_filename)
-
-  config = Config(config_filename, args.gcp_project_id)
-
-  # Change current working directory to grpc root
-  # (This is important because all relative file paths in the config file are
-  # supposed to interpreted as relative to the GRPC root)
-  grpc_root = os.path.abspath(os.path.join(
-      os.path.dirname(sys.argv[0]), '../../..'))
-  os.chdir(grpc_root)
-
-  # Note that tear_down is only in cases where we want to manually tear down a
-  # test that for some reason run_tests() could not cleanup
-  if args.tear_down:
-    tear_down(config)
-    sys.exit(1)
-
-  if not run_tests(config):
-    sys.exit(1)
diff --git a/tools/ubsan_suppressions.txt b/tools/ubsan_suppressions.txt
index 9869f98a22e6507e64bc0bd927f782290d064edb..f87ed1856543561e5654fec9e718a57fbd4648f0 100644
--- a/tools/ubsan_suppressions.txt
+++ b/tools/ubsan_suppressions.txt
@@ -4,4 +4,6 @@ nonnull-attribute:CBB_add_bytes
 nonnull-attribute:rsa_blinding_get
 nonnull-attribute:ssl_copy_key_material
 alignment:CRYPTO_cbc128_encrypt
+nonnull-attribute:google::protobuf::DescriptorBuilder::BuildFileImpl
+nonnull-attribute:google::protobuf::TextFormat::Printer::TextGenerator::Write