diff --git a/.gitignore b/.gitignore
index ca61bda124c993a79f8f6f67b1c37e730337f7a6..f37989176ea38e925baed0f67975210758e42965 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,13 +5,15 @@ libs
 objs
 
 # Python items
+cython_debug/
 python_build/
 .coverage*
 .eggs
-.tox
 htmlcov/
 dist/
 *.egg
+py27/
+py34/
 
 # Node installation output
 ^node_modules
diff --git a/.gitmodules b/.gitmodules
index c85a53943a743cb9da64bd301ed46ff73f175adc..ce647f3c455a9a70df81cdb7c535dc9ad6369831 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,7 +4,7 @@
 [submodule "third_party/protobuf"]
 	path = third_party/protobuf
 	url = https://github.com/google/protobuf.git
-	branch = v3.0.0-beta-2
+	branch = 3.0.0-beta-3
 [submodule "third_party/gflags"]
 	path = third_party/gflags
 	url = https://github.com/gflags/gflags.git
diff --git a/.travis.yml b/.travis.yml
index 16c6390a54a755b6a7a4fa1ef0e177c6015cf24b..4cdad37c6c52def1afaccc039ec10b42f7fddabc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,5 @@
+git:
+  depth: 1
 language: objective-c
 osx_image: xcode7.3
 env:
@@ -5,31 +7,60 @@ env:
     - CONFIG=opt
     - TEST=objc
     - JOBS=1
+  matrix:
+    - SCHEME="RxLibraryUnitTests" WORKSPACE="Tests.xcworkspace"
+      TEST_PATH="src/objective-c/tests" BUILD_ONLY="false"
+      INTEROP_SERVER="false"
+    - SCHEME="InteropTestsLocalSSL" WORKSPACE="Tests.xcworkspace"
+      TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" INTEROP_SERVER="true"
+    - SCHEME="InteropTestsLocalCleartext" WORKSPACE="Tests.xcworkspace"
+      TEST_PATH="src/objective-c/tests"  BUILD_ONLY="false"
+      INTEROP_SERVER="true"
+    # TODO(jcanizales): Make tests an app project (instead of library), so the following will work.
+    # - SCHEME="InteropTestsRemote" WORKSPACE="Tests.xcworkspace"
+    #   TEST_PATH="src/objective-c/tests" BUILD_ONLY="false"
+    #   INTEROP_SERVER="true"
+    - SCHEME="HelloWorld" WORKSPACE="HelloWorld.xcworkspace"
+      TEST_PATH="examples/objective-c/helloworld" BUILD_ONLY="true"
+      INTEROP_SERVER="false"
+    - SCHEME="RouteGuideClient" WORKSPACE="RouteGuideClient.xcworkspace"
+      TEST_PATH="examples/objective-c/route_guide" BUILD_ONLY="true"
+      INTEROP_SERVER="false"
+    - SCHEME="AuthSample" WORKSPACE="AuthSample.xcworkspace"
+      TEST_PATH="examples/objective-c/auth_sample" BUILD_ONLY="true"
+      INTEROP_SERVER="false"
+    - SCHEME="Sample" WORKSPACE="Sample.xcworkspace"
+      TEST_PATH="src/objective-c/examples/Sample" BUILD_ONLY="true"
+      INTEROP_SERVER="false"
+    - SCHEME="SwiftSample" WORKSPACE="SwiftSample.xcworkspace"
+      TEST_PATH="src/objective-c/examples/SwiftSample" BUILD_ONLY="true"
+      INTEROP_SERVER="false"
 before_install:
+  # Until Travis upgrades from Cocoapods 0.39, we need to do it here.
   - pod --version
   - gem uninstall cocoapods -a
-  - gem install cocoapods -v '1.0.0'
+  - gem install cocoapods -v '1.0.1'
   - pod --version
+  # Recent pods aren't found if we don't explicitly update Cocoapods' repo.
+  - pod repo update
   - brew install gflags
-  - pushd third_party/protobuf
-  - git checkout v3.0.0-beta-3
-  - popd
 install:
-  - make grpc_objective_c_plugin
-  - pushd src/objective-c/tests
+  - pushd $TEST_PATH
   - pod install
   - popd
 before_script:
-  - make interop_server
-  - bins/$CONFIG/interop_server --port=5050 &
-  - bins/$CONFIG/interop_server --port=5051 --use_tls &
-xcode_workspace: src/objective-c/tests/Tests.xcworkspace
-xcode_scheme:
-  - RxLibraryUnitTests
-  - InteropTestsLocalSSL
-  - InteropTestsLocalCleartext
-  # TODO(jcanizales): Investigate why they time out:
-  # - InteropTestsRemote
-xcode_sdk: iphonesimulator9.3
+  - if [ "${INTEROP_SERVER}" = "true" ]; then
+      make interop_server;
+      (bins/$CONFIG/interop_server --port=5050 &);
+      (bins/$CONFIG/interop_server --port=5051 --use_tls &);
+    fi
+script:
+  - if [ "${BUILD_ONLY}" = "true" ]; then
+      xctool -workspace "$TEST_PATH/$WORKSPACE" -scheme "$SCHEME"
+      -sdk iphonesimulator9.3 build;
+    else
+      xctool -workspace "$TEST_PATH/$WORKSPACE" -scheme "$SCHEME"
+      -sdk iphonesimulator9.3 test;
+    fi
 notifications:
   email: false
diff --git a/BUILD b/BUILD
index b12d742fba1a48b1417558321de371b413eaf911..15e9d14a82e5cd5f36e90046f770b7c47fbb0aa7 100644
--- a/BUILD
+++ b/BUILD
@@ -177,6 +177,7 @@ cc_library(
     "src/core/lib/iomgr/endpoint.h",
     "src/core/lib/iomgr/endpoint_pair.h",
     "src/core/lib/iomgr/error.h",
+    "src/core/lib/iomgr/ev_epoll_linux.h",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
     "src/core/lib/iomgr/ev_poll_posix.h",
     "src/core/lib/iomgr/ev_posix.h",
@@ -187,6 +188,7 @@ cc_library(
     "src/core/lib/iomgr/iomgr_internal.h",
     "src/core/lib/iomgr/iomgr_posix.h",
     "src/core/lib/iomgr/load_file.h",
+    "src/core/lib/iomgr/network_status_tracker.h",
     "src/core/lib/iomgr/polling_entity.h",
     "src/core/lib/iomgr/pollset.h",
     "src/core/lib/iomgr/pollset_set.h",
@@ -228,7 +230,6 @@ cc_library(
     "src/core/lib/surface/init.h",
     "src/core/lib/surface/lame_client.h",
     "src/core/lib/surface/server.h",
-    "src/core/lib/surface/surface_trace.h",
     "src/core/lib/transport/byte_stream.h",
     "src/core/lib/transport/connectivity_state.h",
     "src/core/lib/transport/metadata.h",
@@ -329,6 +330,7 @@ cc_library(
     "src/core/lib/iomgr/endpoint_pair_posix.c",
     "src/core/lib/iomgr/endpoint_pair_windows.c",
     "src/core/lib/iomgr/error.c",
+    "src/core/lib/iomgr/ev_epoll_linux.c",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
     "src/core/lib/iomgr/ev_poll_posix.c",
     "src/core/lib/iomgr/ev_posix.c",
@@ -339,6 +341,7 @@ cc_library(
     "src/core/lib/iomgr/iomgr_posix.c",
     "src/core/lib/iomgr/iomgr_windows.c",
     "src/core/lib/iomgr/load_file.c",
+    "src/core/lib/iomgr/network_status_tracker.c",
     "src/core/lib/iomgr/polling_entity.c",
     "src/core/lib/iomgr/pollset_set_windows.c",
     "src/core/lib/iomgr/pollset_windows.c",
@@ -563,6 +566,7 @@ cc_library(
     "src/core/lib/iomgr/endpoint.h",
     "src/core/lib/iomgr/endpoint_pair.h",
     "src/core/lib/iomgr/error.h",
+    "src/core/lib/iomgr/ev_epoll_linux.h",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
     "src/core/lib/iomgr/ev_poll_posix.h",
     "src/core/lib/iomgr/ev_posix.h",
@@ -573,6 +577,7 @@ cc_library(
     "src/core/lib/iomgr/iomgr_internal.h",
     "src/core/lib/iomgr/iomgr_posix.h",
     "src/core/lib/iomgr/load_file.h",
+    "src/core/lib/iomgr/network_status_tracker.h",
     "src/core/lib/iomgr/polling_entity.h",
     "src/core/lib/iomgr/pollset.h",
     "src/core/lib/iomgr/pollset_set.h",
@@ -614,7 +619,6 @@ cc_library(
     "src/core/lib/surface/init.h",
     "src/core/lib/surface/lame_client.h",
     "src/core/lib/surface/server.h",
-    "src/core/lib/surface/surface_trace.h",
     "src/core/lib/transport/byte_stream.h",
     "src/core/lib/transport/connectivity_state.h",
     "src/core/lib/transport/metadata.h",
@@ -704,6 +708,7 @@ cc_library(
     "src/core/lib/iomgr/endpoint_pair_posix.c",
     "src/core/lib/iomgr/endpoint_pair_windows.c",
     "src/core/lib/iomgr/error.c",
+    "src/core/lib/iomgr/ev_epoll_linux.c",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
     "src/core/lib/iomgr/ev_poll_posix.c",
     "src/core/lib/iomgr/ev_posix.c",
@@ -714,6 +719,7 @@ cc_library(
     "src/core/lib/iomgr/iomgr_posix.c",
     "src/core/lib/iomgr/iomgr_windows.c",
     "src/core/lib/iomgr/load_file.c",
+    "src/core/lib/iomgr/network_status_tracker.c",
     "src/core/lib/iomgr/polling_entity.c",
     "src/core/lib/iomgr/pollset_set_windows.c",
     "src/core/lib/iomgr/pollset_windows.c",
@@ -912,6 +918,7 @@ cc_library(
     "src/core/lib/iomgr/endpoint.h",
     "src/core/lib/iomgr/endpoint_pair.h",
     "src/core/lib/iomgr/error.h",
+    "src/core/lib/iomgr/ev_epoll_linux.h",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
     "src/core/lib/iomgr/ev_poll_posix.h",
     "src/core/lib/iomgr/ev_posix.h",
@@ -922,6 +929,7 @@ cc_library(
     "src/core/lib/iomgr/iomgr_internal.h",
     "src/core/lib/iomgr/iomgr_posix.h",
     "src/core/lib/iomgr/load_file.h",
+    "src/core/lib/iomgr/network_status_tracker.h",
     "src/core/lib/iomgr/polling_entity.h",
     "src/core/lib/iomgr/pollset.h",
     "src/core/lib/iomgr/pollset_set.h",
@@ -963,7 +971,6 @@ cc_library(
     "src/core/lib/surface/init.h",
     "src/core/lib/surface/lame_client.h",
     "src/core/lib/surface/server.h",
-    "src/core/lib/surface/surface_trace.h",
     "src/core/lib/transport/byte_stream.h",
     "src/core/lib/transport/connectivity_state.h",
     "src/core/lib/transport/metadata.h",
@@ -1041,6 +1048,7 @@ cc_library(
     "src/core/lib/iomgr/endpoint_pair_posix.c",
     "src/core/lib/iomgr/endpoint_pair_windows.c",
     "src/core/lib/iomgr/error.c",
+    "src/core/lib/iomgr/ev_epoll_linux.c",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
     "src/core/lib/iomgr/ev_poll_posix.c",
     "src/core/lib/iomgr/ev_posix.c",
@@ -1051,6 +1059,7 @@ cc_library(
     "src/core/lib/iomgr/iomgr_posix.c",
     "src/core/lib/iomgr/iomgr_windows.c",
     "src/core/lib/iomgr/load_file.c",
+    "src/core/lib/iomgr/network_status_tracker.c",
     "src/core/lib/iomgr/polling_entity.c",
     "src/core/lib/iomgr/pollset_set_windows.c",
     "src/core/lib/iomgr/pollset_windows.c",
@@ -1799,6 +1808,7 @@ objc_library(
     "src/core/lib/iomgr/endpoint_pair_posix.c",
     "src/core/lib/iomgr/endpoint_pair_windows.c",
     "src/core/lib/iomgr/error.c",
+    "src/core/lib/iomgr/ev_epoll_linux.c",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
     "src/core/lib/iomgr/ev_poll_posix.c",
     "src/core/lib/iomgr/ev_posix.c",
@@ -1809,6 +1819,7 @@ objc_library(
     "src/core/lib/iomgr/iomgr_posix.c",
     "src/core/lib/iomgr/iomgr_windows.c",
     "src/core/lib/iomgr/load_file.c",
+    "src/core/lib/iomgr/network_status_tracker.c",
     "src/core/lib/iomgr/polling_entity.c",
     "src/core/lib/iomgr/pollset_set_windows.c",
     "src/core/lib/iomgr/pollset_windows.c",
@@ -2012,6 +2023,7 @@ objc_library(
     "src/core/lib/iomgr/endpoint.h",
     "src/core/lib/iomgr/endpoint_pair.h",
     "src/core/lib/iomgr/error.h",
+    "src/core/lib/iomgr/ev_epoll_linux.h",
     "src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
     "src/core/lib/iomgr/ev_poll_posix.h",
     "src/core/lib/iomgr/ev_posix.h",
@@ -2022,6 +2034,7 @@ objc_library(
     "src/core/lib/iomgr/iomgr_internal.h",
     "src/core/lib/iomgr/iomgr_posix.h",
     "src/core/lib/iomgr/load_file.h",
+    "src/core/lib/iomgr/network_status_tracker.h",
     "src/core/lib/iomgr/polling_entity.h",
     "src/core/lib/iomgr/pollset.h",
     "src/core/lib/iomgr/pollset_set.h",
@@ -2063,7 +2076,6 @@ objc_library(
     "src/core/lib/surface/init.h",
     "src/core/lib/surface/lame_client.h",
     "src/core/lib/surface/server.h",
-    "src/core/lib/surface/surface_trace.h",
     "src/core/lib/transport/byte_stream.h",
     "src/core/lib/transport/connectivity_state.h",
     "src/core/lib/transport/metadata.h",
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9da4d2b5b62e4257766ebf6288ca83e8c0331498
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,1069 @@
+# GRPC global cmake file
+# This currently builds C and C++ code.
+# This file has been automatically generated from a template file.
+# Please look at the templates directory instead.
+# This file can be regenerated from the template by running
+# tools/buildgen/generate_projects.sh
+#
+# Additionally, this is currently very experimental, and unsupported.
+# Further work will happen on that file.
+#
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PACKAGE_NAME      "grpc")
+set(PACKAGE_VERSION   "1.1.0-dev")
+set(PACKAGE_STRING    "${PACKAGE_NAME} ${PACKAGE_VERSION}")
+set(PACKAGE_TARNAME   "${PACKAGE_NAME}-${PACKAGE_VERSION}")
+set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/")
+project(${PACKAGE_NAME} C CXX)
+
+if(NOT BORINGSSL_ROOT_DIR)
+  set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl)
+endif()
+if(NOT PROTOBUF_ROOT_DIR)
+  set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf)
+endif()
+if(NOT ZLIB_ROOT_DIR)
+  set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib)
+endif()
+
+# Building the protobuf tests require gmock what is not part of a standard protobuf checkout.
+# Disable them unless they are explicitly requested from the cmake command line (when we assume
+# gmock is downloaded to the right location inside protobuf).
+if(NOT protobuf_BUILD_TESTS)
+  set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
+endif()
+
+add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl)
+add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf)
+add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib)
+
+set(CMAKE_C_FLAGS   "${CMAKE_C_FLAGS}   -std=c11")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+
+  
+add_library(gpr
+  src/core/lib/profiling/basic_timers.c
+  src/core/lib/profiling/stap_timers.c
+  src/core/lib/support/alloc.c
+  src/core/lib/support/avl.c
+  src/core/lib/support/backoff.c
+  src/core/lib/support/cmdline.c
+  src/core/lib/support/cpu_iphone.c
+  src/core/lib/support/cpu_linux.c
+  src/core/lib/support/cpu_posix.c
+  src/core/lib/support/cpu_windows.c
+  src/core/lib/support/env_linux.c
+  src/core/lib/support/env_posix.c
+  src/core/lib/support/env_windows.c
+  src/core/lib/support/histogram.c
+  src/core/lib/support/host_port.c
+  src/core/lib/support/log.c
+  src/core/lib/support/log_android.c
+  src/core/lib/support/log_linux.c
+  src/core/lib/support/log_posix.c
+  src/core/lib/support/log_windows.c
+  src/core/lib/support/murmur_hash.c
+  src/core/lib/support/slice.c
+  src/core/lib/support/slice_buffer.c
+  src/core/lib/support/stack_lockfree.c
+  src/core/lib/support/string.c
+  src/core/lib/support/string_posix.c
+  src/core/lib/support/string_util_windows.c
+  src/core/lib/support/string_windows.c
+  src/core/lib/support/subprocess_posix.c
+  src/core/lib/support/subprocess_windows.c
+  src/core/lib/support/sync.c
+  src/core/lib/support/sync_posix.c
+  src/core/lib/support/sync_windows.c
+  src/core/lib/support/thd.c
+  src/core/lib/support/thd_posix.c
+  src/core/lib/support/thd_windows.c
+  src/core/lib/support/time.c
+  src/core/lib/support/time_posix.c
+  src/core/lib/support/time_precise.c
+  src/core/lib/support/time_windows.c
+  src/core/lib/support/tls_pthread.c
+  src/core/lib/support/tmpfile_msys.c
+  src/core/lib/support/tmpfile_posix.c
+  src/core/lib/support/tmpfile_windows.c
+  src/core/lib/support/wrap_memcpy.c
+)
+
+target_include_directories(gpr
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+
+  
+add_library(grpc
+  src/core/lib/surface/init.c
+  src/core/lib/channel/channel_args.c
+  src/core/lib/channel/channel_stack.c
+  src/core/lib/channel/channel_stack_builder.c
+  src/core/lib/channel/compress_filter.c
+  src/core/lib/channel/connected_channel.c
+  src/core/lib/channel/http_client_filter.c
+  src/core/lib/channel/http_server_filter.c
+  src/core/lib/compression/compression.c
+  src/core/lib/compression/message_compress.c
+  src/core/lib/debug/trace.c
+  src/core/lib/http/format_request.c
+  src/core/lib/http/httpcli.c
+  src/core/lib/http/parser.c
+  src/core/lib/iomgr/closure.c
+  src/core/lib/iomgr/endpoint.c
+  src/core/lib/iomgr/endpoint_pair_posix.c
+  src/core/lib/iomgr/endpoint_pair_windows.c
+  src/core/lib/iomgr/error.c
+  src/core/lib/iomgr/ev_epoll_linux.c
+  src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+  src/core/lib/iomgr/ev_poll_posix.c
+  src/core/lib/iomgr/ev_posix.c
+  src/core/lib/iomgr/exec_ctx.c
+  src/core/lib/iomgr/executor.c
+  src/core/lib/iomgr/iocp_windows.c
+  src/core/lib/iomgr/iomgr.c
+  src/core/lib/iomgr/iomgr_posix.c
+  src/core/lib/iomgr/iomgr_windows.c
+  src/core/lib/iomgr/load_file.c
+  src/core/lib/iomgr/network_status_tracker.c
+  src/core/lib/iomgr/polling_entity.c
+  src/core/lib/iomgr/pollset_set_windows.c
+  src/core/lib/iomgr/pollset_windows.c
+  src/core/lib/iomgr/resolve_address_posix.c
+  src/core/lib/iomgr/resolve_address_windows.c
+  src/core/lib/iomgr/sockaddr_utils.c
+  src/core/lib/iomgr/socket_utils_common_posix.c
+  src/core/lib/iomgr/socket_utils_linux.c
+  src/core/lib/iomgr/socket_utils_posix.c
+  src/core/lib/iomgr/socket_windows.c
+  src/core/lib/iomgr/tcp_client_posix.c
+  src/core/lib/iomgr/tcp_client_windows.c
+  src/core/lib/iomgr/tcp_posix.c
+  src/core/lib/iomgr/tcp_server_posix.c
+  src/core/lib/iomgr/tcp_server_windows.c
+  src/core/lib/iomgr/tcp_windows.c
+  src/core/lib/iomgr/time_averaged_stats.c
+  src/core/lib/iomgr/timer.c
+  src/core/lib/iomgr/timer_heap.c
+  src/core/lib/iomgr/udp_server.c
+  src/core/lib/iomgr/unix_sockets_posix.c
+  src/core/lib/iomgr/unix_sockets_posix_noop.c
+  src/core/lib/iomgr/wakeup_fd_eventfd.c
+  src/core/lib/iomgr/wakeup_fd_nospecial.c
+  src/core/lib/iomgr/wakeup_fd_pipe.c
+  src/core/lib/iomgr/wakeup_fd_posix.c
+  src/core/lib/iomgr/workqueue_posix.c
+  src/core/lib/iomgr/workqueue_windows.c
+  src/core/lib/json/json.c
+  src/core/lib/json/json_reader.c
+  src/core/lib/json/json_string.c
+  src/core/lib/json/json_writer.c
+  src/core/lib/surface/alarm.c
+  src/core/lib/surface/api_trace.c
+  src/core/lib/surface/byte_buffer.c
+  src/core/lib/surface/byte_buffer_reader.c
+  src/core/lib/surface/call.c
+  src/core/lib/surface/call_details.c
+  src/core/lib/surface/call_log_batch.c
+  src/core/lib/surface/channel.c
+  src/core/lib/surface/channel_init.c
+  src/core/lib/surface/channel_ping.c
+  src/core/lib/surface/channel_stack_type.c
+  src/core/lib/surface/completion_queue.c
+  src/core/lib/surface/event_string.c
+  src/core/lib/surface/lame_client.c
+  src/core/lib/surface/metadata_array.c
+  src/core/lib/surface/server.c
+  src/core/lib/surface/validate_metadata.c
+  src/core/lib/surface/version.c
+  src/core/lib/transport/byte_stream.c
+  src/core/lib/transport/connectivity_state.c
+  src/core/lib/transport/metadata.c
+  src/core/lib/transport/metadata_batch.c
+  src/core/lib/transport/static_metadata.c
+  src/core/lib/transport/transport.c
+  src/core/lib/transport/transport_op_string.c
+  src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+  src/core/ext/transport/chttp2/transport/bin_decoder.c
+  src/core/ext/transport/chttp2/transport/bin_encoder.c
+  src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+  src/core/ext/transport/chttp2/transport/chttp2_transport.c
+  src/core/ext/transport/chttp2/transport/frame_data.c
+  src/core/ext/transport/chttp2/transport/frame_goaway.c
+  src/core/ext/transport/chttp2/transport/frame_ping.c
+  src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+  src/core/ext/transport/chttp2/transport/frame_settings.c
+  src/core/ext/transport/chttp2/transport/frame_window_update.c
+  src/core/ext/transport/chttp2/transport/hpack_encoder.c
+  src/core/ext/transport/chttp2/transport/hpack_parser.c
+  src/core/ext/transport/chttp2/transport/hpack_table.c
+  src/core/ext/transport/chttp2/transport/huffsyms.c
+  src/core/ext/transport/chttp2/transport/incoming_metadata.c
+  src/core/ext/transport/chttp2/transport/parsing.c
+  src/core/ext/transport/chttp2/transport/status_conversion.c
+  src/core/ext/transport/chttp2/transport/stream_lists.c
+  src/core/ext/transport/chttp2/transport/stream_map.c
+  src/core/ext/transport/chttp2/transport/timeout_encoding.c
+  src/core/ext/transport/chttp2/transport/varint.c
+  src/core/ext/transport/chttp2/transport/writing.c
+  src/core/ext/transport/chttp2/alpn/alpn.c
+  src/core/lib/http/httpcli_security_connector.c
+  src/core/lib/security/context/security_context.c
+  src/core/lib/security/credentials/composite/composite_credentials.c
+  src/core/lib/security/credentials/credentials.c
+  src/core/lib/security/credentials/credentials_metadata.c
+  src/core/lib/security/credentials/fake/fake_credentials.c
+  src/core/lib/security/credentials/google_default/credentials_posix.c
+  src/core/lib/security/credentials/google_default/credentials_windows.c
+  src/core/lib/security/credentials/google_default/google_default_credentials.c
+  src/core/lib/security/credentials/iam/iam_credentials.c
+  src/core/lib/security/credentials/jwt/json_token.c
+  src/core/lib/security/credentials/jwt/jwt_credentials.c
+  src/core/lib/security/credentials/jwt/jwt_verifier.c
+  src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+  src/core/lib/security/credentials/plugin/plugin_credentials.c
+  src/core/lib/security/credentials/ssl/ssl_credentials.c
+  src/core/lib/security/transport/client_auth_filter.c
+  src/core/lib/security/transport/handshake.c
+  src/core/lib/security/transport/secure_endpoint.c
+  src/core/lib/security/transport/security_connector.c
+  src/core/lib/security/transport/server_auth_filter.c
+  src/core/lib/security/transport/tsi_error.c
+  src/core/lib/security/util/b64.c
+  src/core/lib/security/util/json_util.c
+  src/core/lib/surface/init_secure.c
+  src/core/lib/tsi/fake_transport_security.c
+  src/core/lib/tsi/ssl_transport_security.c
+  src/core/lib/tsi/transport_security.c
+  src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+  src/core/ext/client_config/channel_connectivity.c
+  src/core/ext/client_config/client_channel.c
+  src/core/ext/client_config/client_channel_factory.c
+  src/core/ext/client_config/client_config.c
+  src/core/ext/client_config/client_config_plugin.c
+  src/core/ext/client_config/connector.c
+  src/core/ext/client_config/default_initial_connect_string.c
+  src/core/ext/client_config/initial_connect_string.c
+  src/core/ext/client_config/lb_policy.c
+  src/core/ext/client_config/lb_policy_factory.c
+  src/core/ext/client_config/lb_policy_registry.c
+  src/core/ext/client_config/parse_address.c
+  src/core/ext/client_config/resolver.c
+  src/core/ext/client_config/resolver_factory.c
+  src/core/ext/client_config/resolver_registry.c
+  src/core/ext/client_config/subchannel.c
+  src/core/ext/client_config/subchannel_call_holder.c
+  src/core/ext/client_config/subchannel_index.c
+  src/core/ext/client_config/uri_parser.c
+  src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+  src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
+  src/core/ext/transport/chttp2/client/insecure/channel_create.c
+  src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+  src/core/ext/lb_policy/grpclb/grpclb.c
+  src/core/ext/lb_policy/grpclb/load_balancer_api.c
+  src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
+  third_party/nanopb/pb_common.c
+  third_party/nanopb/pb_decode.c
+  third_party/nanopb/pb_encode.c
+  src/core/ext/lb_policy/pick_first/pick_first.c
+  src/core/ext/lb_policy/round_robin/round_robin.c
+  src/core/ext/resolver/dns/native/dns_resolver.c
+  src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+  src/core/ext/load_reporting/load_reporting.c
+  src/core/ext/load_reporting/load_reporting_filter.c
+  src/core/ext/census/context.c
+  src/core/ext/census/gen/census.pb.c
+  src/core/ext/census/grpc_context.c
+  src/core/ext/census/grpc_filter.c
+  src/core/ext/census/grpc_plugin.c
+  src/core/ext/census/initialize.c
+  src/core/ext/census/mlog.c
+  src/core/ext/census/operation.c
+  src/core/ext/census/placeholders.c
+  src/core/ext/census/tracing.c
+  src/core/plugin_registry/grpc_plugin_registry.c
+)
+
+target_include_directories(grpc
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc
+  ssl
+  zlibstatic
+  gpr
+)
+
+  
+add_library(grpc_cronet
+  src/core/lib/surface/init.c
+  src/core/lib/channel/channel_args.c
+  src/core/lib/channel/channel_stack.c
+  src/core/lib/channel/channel_stack_builder.c
+  src/core/lib/channel/compress_filter.c
+  src/core/lib/channel/connected_channel.c
+  src/core/lib/channel/http_client_filter.c
+  src/core/lib/channel/http_server_filter.c
+  src/core/lib/compression/compression.c
+  src/core/lib/compression/message_compress.c
+  src/core/lib/debug/trace.c
+  src/core/lib/http/format_request.c
+  src/core/lib/http/httpcli.c
+  src/core/lib/http/parser.c
+  src/core/lib/iomgr/closure.c
+  src/core/lib/iomgr/endpoint.c
+  src/core/lib/iomgr/endpoint_pair_posix.c
+  src/core/lib/iomgr/endpoint_pair_windows.c
+  src/core/lib/iomgr/error.c
+  src/core/lib/iomgr/ev_epoll_linux.c
+  src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+  src/core/lib/iomgr/ev_poll_posix.c
+  src/core/lib/iomgr/ev_posix.c
+  src/core/lib/iomgr/exec_ctx.c
+  src/core/lib/iomgr/executor.c
+  src/core/lib/iomgr/iocp_windows.c
+  src/core/lib/iomgr/iomgr.c
+  src/core/lib/iomgr/iomgr_posix.c
+  src/core/lib/iomgr/iomgr_windows.c
+  src/core/lib/iomgr/load_file.c
+  src/core/lib/iomgr/network_status_tracker.c
+  src/core/lib/iomgr/polling_entity.c
+  src/core/lib/iomgr/pollset_set_windows.c
+  src/core/lib/iomgr/pollset_windows.c
+  src/core/lib/iomgr/resolve_address_posix.c
+  src/core/lib/iomgr/resolve_address_windows.c
+  src/core/lib/iomgr/sockaddr_utils.c
+  src/core/lib/iomgr/socket_utils_common_posix.c
+  src/core/lib/iomgr/socket_utils_linux.c
+  src/core/lib/iomgr/socket_utils_posix.c
+  src/core/lib/iomgr/socket_windows.c
+  src/core/lib/iomgr/tcp_client_posix.c
+  src/core/lib/iomgr/tcp_client_windows.c
+  src/core/lib/iomgr/tcp_posix.c
+  src/core/lib/iomgr/tcp_server_posix.c
+  src/core/lib/iomgr/tcp_server_windows.c
+  src/core/lib/iomgr/tcp_windows.c
+  src/core/lib/iomgr/time_averaged_stats.c
+  src/core/lib/iomgr/timer.c
+  src/core/lib/iomgr/timer_heap.c
+  src/core/lib/iomgr/udp_server.c
+  src/core/lib/iomgr/unix_sockets_posix.c
+  src/core/lib/iomgr/unix_sockets_posix_noop.c
+  src/core/lib/iomgr/wakeup_fd_eventfd.c
+  src/core/lib/iomgr/wakeup_fd_nospecial.c
+  src/core/lib/iomgr/wakeup_fd_pipe.c
+  src/core/lib/iomgr/wakeup_fd_posix.c
+  src/core/lib/iomgr/workqueue_posix.c
+  src/core/lib/iomgr/workqueue_windows.c
+  src/core/lib/json/json.c
+  src/core/lib/json/json_reader.c
+  src/core/lib/json/json_string.c
+  src/core/lib/json/json_writer.c
+  src/core/lib/surface/alarm.c
+  src/core/lib/surface/api_trace.c
+  src/core/lib/surface/byte_buffer.c
+  src/core/lib/surface/byte_buffer_reader.c
+  src/core/lib/surface/call.c
+  src/core/lib/surface/call_details.c
+  src/core/lib/surface/call_log_batch.c
+  src/core/lib/surface/channel.c
+  src/core/lib/surface/channel_init.c
+  src/core/lib/surface/channel_ping.c
+  src/core/lib/surface/channel_stack_type.c
+  src/core/lib/surface/completion_queue.c
+  src/core/lib/surface/event_string.c
+  src/core/lib/surface/lame_client.c
+  src/core/lib/surface/metadata_array.c
+  src/core/lib/surface/server.c
+  src/core/lib/surface/validate_metadata.c
+  src/core/lib/surface/version.c
+  src/core/lib/transport/byte_stream.c
+  src/core/lib/transport/connectivity_state.c
+  src/core/lib/transport/metadata.c
+  src/core/lib/transport/metadata_batch.c
+  src/core/lib/transport/static_metadata.c
+  src/core/lib/transport/transport.c
+  src/core/lib/transport/transport_op_string.c
+  src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
+  src/core/ext/transport/cronet/transport/cronet_api_dummy.c
+  src/core/ext/transport/cronet/transport/cronet_transport.c
+  src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+  src/core/ext/transport/chttp2/transport/bin_decoder.c
+  src/core/ext/transport/chttp2/transport/bin_encoder.c
+  src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+  src/core/ext/transport/chttp2/transport/chttp2_transport.c
+  src/core/ext/transport/chttp2/transport/frame_data.c
+  src/core/ext/transport/chttp2/transport/frame_goaway.c
+  src/core/ext/transport/chttp2/transport/frame_ping.c
+  src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+  src/core/ext/transport/chttp2/transport/frame_settings.c
+  src/core/ext/transport/chttp2/transport/frame_window_update.c
+  src/core/ext/transport/chttp2/transport/hpack_encoder.c
+  src/core/ext/transport/chttp2/transport/hpack_parser.c
+  src/core/ext/transport/chttp2/transport/hpack_table.c
+  src/core/ext/transport/chttp2/transport/huffsyms.c
+  src/core/ext/transport/chttp2/transport/incoming_metadata.c
+  src/core/ext/transport/chttp2/transport/parsing.c
+  src/core/ext/transport/chttp2/transport/status_conversion.c
+  src/core/ext/transport/chttp2/transport/stream_lists.c
+  src/core/ext/transport/chttp2/transport/stream_map.c
+  src/core/ext/transport/chttp2/transport/timeout_encoding.c
+  src/core/ext/transport/chttp2/transport/varint.c
+  src/core/ext/transport/chttp2/transport/writing.c
+  src/core/ext/transport/chttp2/alpn/alpn.c
+  src/core/ext/client_config/channel_connectivity.c
+  src/core/ext/client_config/client_channel.c
+  src/core/ext/client_config/client_channel_factory.c
+  src/core/ext/client_config/client_config.c
+  src/core/ext/client_config/client_config_plugin.c
+  src/core/ext/client_config/connector.c
+  src/core/ext/client_config/default_initial_connect_string.c
+  src/core/ext/client_config/initial_connect_string.c
+  src/core/ext/client_config/lb_policy.c
+  src/core/ext/client_config/lb_policy_factory.c
+  src/core/ext/client_config/lb_policy_registry.c
+  src/core/ext/client_config/parse_address.c
+  src/core/ext/client_config/resolver.c
+  src/core/ext/client_config/resolver_factory.c
+  src/core/ext/client_config/resolver_registry.c
+  src/core/ext/client_config/subchannel.c
+  src/core/ext/client_config/subchannel_call_holder.c
+  src/core/ext/client_config/subchannel_index.c
+  src/core/ext/client_config/uri_parser.c
+  src/core/lib/http/httpcli_security_connector.c
+  src/core/lib/security/context/security_context.c
+  src/core/lib/security/credentials/composite/composite_credentials.c
+  src/core/lib/security/credentials/credentials.c
+  src/core/lib/security/credentials/credentials_metadata.c
+  src/core/lib/security/credentials/fake/fake_credentials.c
+  src/core/lib/security/credentials/google_default/credentials_posix.c
+  src/core/lib/security/credentials/google_default/credentials_windows.c
+  src/core/lib/security/credentials/google_default/google_default_credentials.c
+  src/core/lib/security/credentials/iam/iam_credentials.c
+  src/core/lib/security/credentials/jwt/json_token.c
+  src/core/lib/security/credentials/jwt/jwt_credentials.c
+  src/core/lib/security/credentials/jwt/jwt_verifier.c
+  src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+  src/core/lib/security/credentials/plugin/plugin_credentials.c
+  src/core/lib/security/credentials/ssl/ssl_credentials.c
+  src/core/lib/security/transport/client_auth_filter.c
+  src/core/lib/security/transport/handshake.c
+  src/core/lib/security/transport/secure_endpoint.c
+  src/core/lib/security/transport/security_connector.c
+  src/core/lib/security/transport/server_auth_filter.c
+  src/core/lib/security/transport/tsi_error.c
+  src/core/lib/security/util/b64.c
+  src/core/lib/security/util/json_util.c
+  src/core/lib/surface/init_secure.c
+  src/core/lib/tsi/fake_transport_security.c
+  src/core/lib/tsi/ssl_transport_security.c
+  src/core/lib/tsi/transport_security.c
+  src/core/plugin_registry/grpc_cronet_plugin_registry.c
+)
+
+target_include_directories(grpc_cronet
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_cronet
+  ssl
+  gpr
+)
+
+  
+add_library(grpc_unsecure
+  src/core/lib/surface/init.c
+  src/core/lib/surface/init_unsecure.c
+  src/core/lib/channel/channel_args.c
+  src/core/lib/channel/channel_stack.c
+  src/core/lib/channel/channel_stack_builder.c
+  src/core/lib/channel/compress_filter.c
+  src/core/lib/channel/connected_channel.c
+  src/core/lib/channel/http_client_filter.c
+  src/core/lib/channel/http_server_filter.c
+  src/core/lib/compression/compression.c
+  src/core/lib/compression/message_compress.c
+  src/core/lib/debug/trace.c
+  src/core/lib/http/format_request.c
+  src/core/lib/http/httpcli.c
+  src/core/lib/http/parser.c
+  src/core/lib/iomgr/closure.c
+  src/core/lib/iomgr/endpoint.c
+  src/core/lib/iomgr/endpoint_pair_posix.c
+  src/core/lib/iomgr/endpoint_pair_windows.c
+  src/core/lib/iomgr/error.c
+  src/core/lib/iomgr/ev_epoll_linux.c
+  src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+  src/core/lib/iomgr/ev_poll_posix.c
+  src/core/lib/iomgr/ev_posix.c
+  src/core/lib/iomgr/exec_ctx.c
+  src/core/lib/iomgr/executor.c
+  src/core/lib/iomgr/iocp_windows.c
+  src/core/lib/iomgr/iomgr.c
+  src/core/lib/iomgr/iomgr_posix.c
+  src/core/lib/iomgr/iomgr_windows.c
+  src/core/lib/iomgr/load_file.c
+  src/core/lib/iomgr/network_status_tracker.c
+  src/core/lib/iomgr/polling_entity.c
+  src/core/lib/iomgr/pollset_set_windows.c
+  src/core/lib/iomgr/pollset_windows.c
+  src/core/lib/iomgr/resolve_address_posix.c
+  src/core/lib/iomgr/resolve_address_windows.c
+  src/core/lib/iomgr/sockaddr_utils.c
+  src/core/lib/iomgr/socket_utils_common_posix.c
+  src/core/lib/iomgr/socket_utils_linux.c
+  src/core/lib/iomgr/socket_utils_posix.c
+  src/core/lib/iomgr/socket_windows.c
+  src/core/lib/iomgr/tcp_client_posix.c
+  src/core/lib/iomgr/tcp_client_windows.c
+  src/core/lib/iomgr/tcp_posix.c
+  src/core/lib/iomgr/tcp_server_posix.c
+  src/core/lib/iomgr/tcp_server_windows.c
+  src/core/lib/iomgr/tcp_windows.c
+  src/core/lib/iomgr/time_averaged_stats.c
+  src/core/lib/iomgr/timer.c
+  src/core/lib/iomgr/timer_heap.c
+  src/core/lib/iomgr/udp_server.c
+  src/core/lib/iomgr/unix_sockets_posix.c
+  src/core/lib/iomgr/unix_sockets_posix_noop.c
+  src/core/lib/iomgr/wakeup_fd_eventfd.c
+  src/core/lib/iomgr/wakeup_fd_nospecial.c
+  src/core/lib/iomgr/wakeup_fd_pipe.c
+  src/core/lib/iomgr/wakeup_fd_posix.c
+  src/core/lib/iomgr/workqueue_posix.c
+  src/core/lib/iomgr/workqueue_windows.c
+  src/core/lib/json/json.c
+  src/core/lib/json/json_reader.c
+  src/core/lib/json/json_string.c
+  src/core/lib/json/json_writer.c
+  src/core/lib/surface/alarm.c
+  src/core/lib/surface/api_trace.c
+  src/core/lib/surface/byte_buffer.c
+  src/core/lib/surface/byte_buffer_reader.c
+  src/core/lib/surface/call.c
+  src/core/lib/surface/call_details.c
+  src/core/lib/surface/call_log_batch.c
+  src/core/lib/surface/channel.c
+  src/core/lib/surface/channel_init.c
+  src/core/lib/surface/channel_ping.c
+  src/core/lib/surface/channel_stack_type.c
+  src/core/lib/surface/completion_queue.c
+  src/core/lib/surface/event_string.c
+  src/core/lib/surface/lame_client.c
+  src/core/lib/surface/metadata_array.c
+  src/core/lib/surface/server.c
+  src/core/lib/surface/validate_metadata.c
+  src/core/lib/surface/version.c
+  src/core/lib/transport/byte_stream.c
+  src/core/lib/transport/connectivity_state.c
+  src/core/lib/transport/metadata.c
+  src/core/lib/transport/metadata_batch.c
+  src/core/lib/transport/static_metadata.c
+  src/core/lib/transport/transport.c
+  src/core/lib/transport/transport_op_string.c
+  src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+  src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
+  src/core/ext/transport/chttp2/transport/bin_decoder.c
+  src/core/ext/transport/chttp2/transport/bin_encoder.c
+  src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+  src/core/ext/transport/chttp2/transport/chttp2_transport.c
+  src/core/ext/transport/chttp2/transport/frame_data.c
+  src/core/ext/transport/chttp2/transport/frame_goaway.c
+  src/core/ext/transport/chttp2/transport/frame_ping.c
+  src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+  src/core/ext/transport/chttp2/transport/frame_settings.c
+  src/core/ext/transport/chttp2/transport/frame_window_update.c
+  src/core/ext/transport/chttp2/transport/hpack_encoder.c
+  src/core/ext/transport/chttp2/transport/hpack_parser.c
+  src/core/ext/transport/chttp2/transport/hpack_table.c
+  src/core/ext/transport/chttp2/transport/huffsyms.c
+  src/core/ext/transport/chttp2/transport/incoming_metadata.c
+  src/core/ext/transport/chttp2/transport/parsing.c
+  src/core/ext/transport/chttp2/transport/status_conversion.c
+  src/core/ext/transport/chttp2/transport/stream_lists.c
+  src/core/ext/transport/chttp2/transport/stream_map.c
+  src/core/ext/transport/chttp2/transport/timeout_encoding.c
+  src/core/ext/transport/chttp2/transport/varint.c
+  src/core/ext/transport/chttp2/transport/writing.c
+  src/core/ext/transport/chttp2/alpn/alpn.c
+  src/core/ext/transport/chttp2/client/insecure/channel_create.c
+  src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+  src/core/ext/client_config/channel_connectivity.c
+  src/core/ext/client_config/client_channel.c
+  src/core/ext/client_config/client_channel_factory.c
+  src/core/ext/client_config/client_config.c
+  src/core/ext/client_config/client_config_plugin.c
+  src/core/ext/client_config/connector.c
+  src/core/ext/client_config/default_initial_connect_string.c
+  src/core/ext/client_config/initial_connect_string.c
+  src/core/ext/client_config/lb_policy.c
+  src/core/ext/client_config/lb_policy_factory.c
+  src/core/ext/client_config/lb_policy_registry.c
+  src/core/ext/client_config/parse_address.c
+  src/core/ext/client_config/resolver.c
+  src/core/ext/client_config/resolver_factory.c
+  src/core/ext/client_config/resolver_registry.c
+  src/core/ext/client_config/subchannel.c
+  src/core/ext/client_config/subchannel_call_holder.c
+  src/core/ext/client_config/subchannel_index.c
+  src/core/ext/client_config/uri_parser.c
+  src/core/ext/resolver/dns/native/dns_resolver.c
+  src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+  src/core/ext/load_reporting/load_reporting.c
+  src/core/ext/load_reporting/load_reporting_filter.c
+  src/core/ext/lb_policy/grpclb/grpclb.c
+  src/core/ext/lb_policy/grpclb/load_balancer_api.c
+  src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
+  third_party/nanopb/pb_common.c
+  third_party/nanopb/pb_decode.c
+  third_party/nanopb/pb_encode.c
+  src/core/ext/lb_policy/pick_first/pick_first.c
+  src/core/ext/lb_policy/round_robin/round_robin.c
+  src/core/ext/census/context.c
+  src/core/ext/census/gen/census.pb.c
+  src/core/ext/census/grpc_context.c
+  src/core/ext/census/grpc_filter.c
+  src/core/ext/census/grpc_plugin.c
+  src/core/ext/census/initialize.c
+  src/core/ext/census/mlog.c
+  src/core/ext/census/operation.c
+  src/core/ext/census/placeholders.c
+  src/core/ext/census/tracing.c
+  src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+)
+
+target_include_directories(grpc_unsecure
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_unsecure
+  gpr
+)
+
+  
+add_library(grpc++
+  src/cpp/client/secure_credentials.cc
+  src/cpp/common/auth_property_iterator.cc
+  src/cpp/common/secure_auth_context.cc
+  src/cpp/common/secure_channel_arguments.cc
+  src/cpp/common/secure_create_auth_context.cc
+  src/cpp/server/secure_server_credentials.cc
+  src/cpp/client/channel.cc
+  src/cpp/client/client_context.cc
+  src/cpp/client/create_channel.cc
+  src/cpp/client/create_channel_internal.cc
+  src/cpp/client/create_channel_posix.cc
+  src/cpp/client/credentials.cc
+  src/cpp/client/generic_stub.cc
+  src/cpp/client/insecure_credentials.cc
+  src/cpp/common/channel_arguments.cc
+  src/cpp/common/completion_queue.cc
+  src/cpp/common/core_codegen.cc
+  src/cpp/common/rpc_method.cc
+  src/cpp/server/async_generic_service.cc
+  src/cpp/server/create_default_thread_pool.cc
+  src/cpp/server/dynamic_thread_pool.cc
+  src/cpp/server/insecure_server_credentials.cc
+  src/cpp/server/server.cc
+  src/cpp/server/server_builder.cc
+  src/cpp/server/server_context.cc
+  src/cpp/server/server_credentials.cc
+  src/cpp/server/server_posix.cc
+  src/cpp/util/byte_buffer.cc
+  src/cpp/util/slice.cc
+  src/cpp/util/status.cc
+  src/cpp/util/string_ref.cc
+  src/cpp/util/time.cc
+  src/cpp/codegen/codegen_init.cc
+)
+
+target_include_directories(grpc++
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc++
+  ssl
+  libprotobuf
+  grpc
+)
+
+  
+add_library(grpc++_reflection
+  src/cpp/ext/proto_server_reflection.cc
+  src/cpp/ext/proto_server_reflection_plugin.cc
+  src/cpp/ext/reflection.grpc.pb.cc
+  src/cpp/ext/reflection.pb.cc
+)
+
+target_include_directories(grpc++_reflection
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc++_reflection
+  grpc++
+)
+
+  
+add_library(grpc++_unsecure
+  src/cpp/common/insecure_create_auth_context.cc
+  src/cpp/client/channel.cc
+  src/cpp/client/client_context.cc
+  src/cpp/client/create_channel.cc
+  src/cpp/client/create_channel_internal.cc
+  src/cpp/client/create_channel_posix.cc
+  src/cpp/client/credentials.cc
+  src/cpp/client/generic_stub.cc
+  src/cpp/client/insecure_credentials.cc
+  src/cpp/common/channel_arguments.cc
+  src/cpp/common/completion_queue.cc
+  src/cpp/common/core_codegen.cc
+  src/cpp/common/rpc_method.cc
+  src/cpp/server/async_generic_service.cc
+  src/cpp/server/create_default_thread_pool.cc
+  src/cpp/server/dynamic_thread_pool.cc
+  src/cpp/server/insecure_server_credentials.cc
+  src/cpp/server/server.cc
+  src/cpp/server/server_builder.cc
+  src/cpp/server/server_context.cc
+  src/cpp/server/server_credentials.cc
+  src/cpp/server/server_posix.cc
+  src/cpp/util/byte_buffer.cc
+  src/cpp/util/slice.cc
+  src/cpp/util/status.cc
+  src/cpp/util/string_ref.cc
+  src/cpp/util/time.cc
+  src/cpp/codegen/codegen_init.cc
+)
+
+target_include_directories(grpc++_unsecure
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc++_unsecure
+  libprotobuf
+  gpr
+  grpc_unsecure
+  grpc
+)
+
+  
+add_library(grpc_plugin_support
+  src/compiler/cpp_generator.cc
+  src/compiler/csharp_generator.cc
+  src/compiler/node_generator.cc
+  src/compiler/objective_c_generator.cc
+  src/compiler/python_generator.cc
+  src/compiler/ruby_generator.cc
+)
+
+target_include_directories(grpc_plugin_support
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_plugin_support
+  libprotoc
+)
+
+  
+add_library(grpc_csharp_ext
+  src/csharp/ext/grpc_csharp_ext.c
+)
+
+target_include_directories(grpc_csharp_ext
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_csharp_ext
+  grpc
+  gpr
+)
+
+
+
+add_executable(gen_hpack_tables
+  tools/codegen/core/gen_hpack_tables.c
+)
+
+target_include_directories(gen_hpack_tables
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(gen_hpack_tables
+  gpr
+  grpc
+)
+
+
+add_executable(gen_legal_metadata_characters
+  tools/codegen/core/gen_legal_metadata_characters.c
+)
+
+target_include_directories(gen_legal_metadata_characters
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+
+
+add_executable(grpc_create_jwt
+  test/core/security/create_jwt.c
+)
+
+target_include_directories(grpc_create_jwt
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_create_jwt
+  ssl
+  grpc
+  gpr
+)
+
+
+add_executable(grpc_print_google_default_creds_token
+  test/core/security/print_google_default_creds_token.c
+)
+
+target_include_directories(grpc_print_google_default_creds_token
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_print_google_default_creds_token
+  grpc
+  gpr
+)
+
+
+add_executable(grpc_verify_jwt
+  test/core/security/verify_jwt.c
+)
+
+target_include_directories(grpc_verify_jwt
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_verify_jwt
+  grpc
+  gpr
+)
+
+
+add_executable(grpc_cpp_plugin
+  src/compiler/cpp_plugin.cc
+)
+
+target_include_directories(grpc_cpp_plugin
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_cpp_plugin
+  libprotoc
+  grpc_plugin_support
+)
+
+
+add_executable(grpc_csharp_plugin
+  src/compiler/csharp_plugin.cc
+)
+
+target_include_directories(grpc_csharp_plugin
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_csharp_plugin
+  libprotoc
+  grpc_plugin_support
+)
+
+
+add_executable(grpc_node_plugin
+  src/compiler/node_plugin.cc
+)
+
+target_include_directories(grpc_node_plugin
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_node_plugin
+  libprotoc
+  grpc_plugin_support
+)
+
+
+add_executable(grpc_objective_c_plugin
+  src/compiler/objective_c_plugin.cc
+)
+
+target_include_directories(grpc_objective_c_plugin
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_objective_c_plugin
+  libprotoc
+  grpc_plugin_support
+)
+
+
+add_executable(grpc_python_plugin
+  src/compiler/python_plugin.cc
+)
+
+target_include_directories(grpc_python_plugin
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_python_plugin
+  libprotoc
+  grpc_plugin_support
+)
+
+
+add_executable(grpc_ruby_plugin
+  src/compiler/ruby_plugin.cc
+)
+
+target_include_directories(grpc_ruby_plugin
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+)
+
+target_link_libraries(grpc_ruby_plugin
+  libprotoc
+  grpc_plugin_support
+)
+
+
+
+
+
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 35eb5e613893222cf5fe8aac33f5a84ff255eb06..56bb4b6eb9805cfd449e493b0d8b7627a14ad7c1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -53,6 +53,13 @@ To know about the list of available commands, do this:
 
 `./tools/run_tests/run_tests.py -h`
 
+If you are running tests for ObjC on osx, follow these steps before running tests:
+* install Xcode command-line tools by running
+`sudo xcode-select --install`
+* install macports from https://www.macports.org/install.php
+* install autoconf, automake, libtool, gflags, cmake using macports
+* restart your terminal window or run source ~/.bash_profile to pick up the new PATH changes.
+
 ## Adding or removing source code
 
 Each language uses its own build system to work. Currently, the root's Makefile
diff --git a/INSTALL.md b/INSTALL.md
index 66e6c33a77593760869b582ba3e0c03c165a70e8..6718aad120d79dc1c8383fa7bb4bdab75879d0f2 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -49,7 +49,7 @@ For developers who are interested to contribute, here is how to compile the
 gRPC C Core library.
 
 ```sh
- $ git clone https://github.com/grpc/grpc.git
+ $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
  $ cd grpc
  $ git submodule update --init
  $ make
diff --git a/MANIFEST.md b/MANIFEST.md
index 77e014002d96560cef7399db297caf0a8800417a..a0e79e853273ad4b32597e174ed19e065cc6d9ed 100644
--- a/MANIFEST.md
+++ b/MANIFEST.md
@@ -19,7 +19,6 @@
 * [requirements.txt](requirements.txt)
 * [setup.cfg](setup.cfg)
 * [setup.py](setup.py)
-* [tox.ini](tox.ini)
 * [PYTHON-MANIFEST.in](PYTHON-MANIFEST.in)
 
 ## Ruby
diff --git a/Makefile b/Makefile
index 385330cc74cdaf6509b6e010cc796ae0d99f94ff..ab39f68f2054428a1ebbfb36b6b01b3c36e1c804 100644
--- a/Makefile
+++ b/Makefile
@@ -200,6 +200,7 @@ LD_tsan = clang
 LDXX_tsan = clang++
 CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
 LDFLAGS_tsan = -fsanitize=thread
+DEFINES_tsan = GRPC_TSAN
 DEFINES_tsan += GRPC_TEST_SLOWDOWN_BUILD_FACTOR=5
 
 VALID_CONFIG_stapprof = 1
@@ -414,7 +415,7 @@ E = @echo
 Q = @
 endif
 
-VERSION = 0.15.0-dev
+VERSION = 1.1.0-dev
 
 CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
 CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -447,7 +448,7 @@ PC_TEMPLATE = prefix=$(prefix),exec_prefix=\$${prefix},includedir=\$${prefix}/in
 ifeq ($(SYSTEM),MINGW32)
 SHARED_EXT = dll
 SHARED_PREFIX =
-SHARED_VERSION = -0
+SHARED_VERSION = -1
 else ifeq ($(SYSTEM),Darwin)
 SHARED_EXT = dylib
 SHARED_PREFIX = lib
@@ -491,7 +492,6 @@ PROTOC_CHECK_CMD = which protoc > /dev/null
 PROTOC_CHECK_VERSION_CMD = protoc --version | grep -q libprotoc.3
 DTRACE_CHECK_CMD = which dtrace > /dev/null
 SYSTEMTAP_HEADERS_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/systemtap.c $(LDFLAGS)
-ZOOKEEPER_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/zookeeper.c $(LDFLAGS) -lzookeeper_mt
 
 ifndef REQUIRE_CUSTOM_LIBRARIES_$(CONFIG)
 HAS_SYSTEM_PERFTOOLS ?= $(shell $(PERFTOOLS_CHECK_CMD) 2> /dev/null && echo true || echo false)
@@ -559,8 +559,6 @@ ifeq ($(HAS_SYSTEMTAP),true)
 CACHE_MK += HAS_SYSTEMTAP = true,
 endif
 
-HAS_ZOOKEEPER = $(shell $(ZOOKEEPER_CHECK_CMD) 2> /dev/null && echo true || echo false)
-
 # Note that for testing purposes, one can do:
 #   make HAS_EMBEDDED_OPENSSL_ALPN=false
 # to emulate the fact we do not have OpenSSL in the third_party folder.
@@ -704,14 +702,6 @@ PC_LIBS_PRIVATE = $(PC_LIBS_GRPC)
 PC_LIB = -lgrpc
 GRPC_UNSECURE_PC_FILE := $(PC_TEMPLATE)
 
-# grpc_zookeeper .pc file
-PC_NAME = gRPC zookeeper
-PC_DESCRIPTION = gRPC's zookeeper plugin
-PC_CFLAGS =
-PC_REQUIRES_PRIVATE =
-PC_LIBS_PRIVATE = -lzookeeper_mt
-GRPC_ZOOKEEPER_PC_FILE := $(PC_TEMPLATE)
-
 PROTOBUF_PKG_CONFIG = false
 
 PC_REQUIRES_GRPCXX =
@@ -889,6 +879,7 @@ algorithm_test: $(BINDIR)/$(CONFIG)/algorithm_test
 alloc_test: $(BINDIR)/$(CONFIG)/alloc_test
 alpn_test: $(BINDIR)/$(CONFIG)/alpn_test
 api_fuzzer: $(BINDIR)/$(CONFIG)/api_fuzzer
+bad_server_response_test: $(BINDIR)/$(CONFIG)/bad_server_response_test
 bin_decoder_test: $(BINDIR)/$(CONFIG)/bin_decoder_test
 bin_encoder_test: $(BINDIR)/$(CONFIG)/bin_encoder_test
 census_context_test: $(BINDIR)/$(CONFIG)/census_context_test
@@ -904,6 +895,7 @@ dns_resolver_connectivity_test: $(BINDIR)/$(CONFIG)/dns_resolver_connectivity_te
 dns_resolver_test: $(BINDIR)/$(CONFIG)/dns_resolver_test
 dualstack_socket_test: $(BINDIR)/$(CONFIG)/dualstack_socket_test
 endpoint_pair_test: $(BINDIR)/$(CONFIG)/endpoint_pair_test
+ev_epoll_linux_test: $(BINDIR)/$(CONFIG)/ev_epoll_linux_test
 fd_conservation_posix_test: $(BINDIR)/$(CONFIG)/fd_conservation_posix_test
 fd_posix_test: $(BINDIR)/$(CONFIG)/fd_posix_test
 fling_client: $(BINDIR)/$(CONFIG)/fling_client
@@ -993,7 +985,6 @@ time_averaged_stats_test: $(BINDIR)/$(CONFIG)/time_averaged_stats_test
 timeout_encoding_test: $(BINDIR)/$(CONFIG)/timeout_encoding_test
 timer_heap_test: $(BINDIR)/$(CONFIG)/timer_heap_test
 timer_list_test: $(BINDIR)/$(CONFIG)/timer_list_test
-timers_test: $(BINDIR)/$(CONFIG)/timers_test
 transport_connectivity_state_test: $(BINDIR)/$(CONFIG)/transport_connectivity_state_test
 transport_metadata_test: $(BINDIR)/$(CONFIG)/transport_metadata_test
 transport_security_test: $(BINDIR)/$(CONFIG)/transport_security_test
@@ -1151,7 +1142,6 @@ run_dep_checks:
 	$(PERFTOOLS_CHECK_CMD) || true
 	$(PROTOBUF_CHECK_CMD) || true
 	$(PROTOC_CHECK_VERSION_CMD) || true
-	$(ZOOKEEPER_CHECK_CMD) || true
 
 third_party/protobuf/configure:
 	$(E) "[AUTOGEN] Preparing protobuf"
@@ -1170,29 +1160,16 @@ $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure
 
 static: static_c static_cxx
 
-static_c: pc_c pc_c_unsecure cache.mk pc_c_zookeeper $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a static_zookeeper_libs
-
+static_c: pc_c pc_c_unsecure cache.mk  $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a
 
 static_cxx: pc_cxx pc_cxx_unsecure cache.mk  $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a
 
 shared: shared_c shared_cxx
 
-shared_c: pc_c pc_c_unsecure cache.mk pc_c_zookeeper $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) shared_zookeeper_libs
-
+shared_c: pc_c pc_c_unsecure cache.mk $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)
 shared_cxx: pc_cxx pc_cxx_unsecure cache.mk $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT)
 
 shared_csharp: shared_c  $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT)
-ifeq ($(HAS_ZOOKEEPER),true)
-static_zookeeper_libs:
-shared_zookeeper_libs:
-else
-
-static_zookeeper_libs:
-
-shared_zookeeper_libs:
-
-endif
-
 grpc_csharp_ext: shared_csharp
 
 plugins: $(PROTOC_PLUGINS)
@@ -1204,12 +1181,6 @@ pc_c: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc
 
 pc_c_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc
 
-ifeq ($(HAS_ZOOKEEPER),true)
-pc_c_zookeeper: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_zookeeper.pc
-else
-pc_c_zookeeper:
-endif
-
 pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc
 
 pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc
@@ -1221,20 +1192,14 @@ privatelibs_cxx:  $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG
 endif
 
 
-ifeq ($(HAS_ZOOKEEPER),true)
-privatelibs_zookeeper: 
-else
-privatelibs_zookeeper:
-endif
-
-
-buildtests: buildtests_c buildtests_cxx buildtests_zookeeper
+buildtests: buildtests_c buildtests_cxx
 
 buildtests_c: privatelibs_c \
   $(BINDIR)/$(CONFIG)/alarm_test \
   $(BINDIR)/$(CONFIG)/algorithm_test \
   $(BINDIR)/$(CONFIG)/alloc_test \
   $(BINDIR)/$(CONFIG)/alpn_test \
+  $(BINDIR)/$(CONFIG)/bad_server_response_test \
   $(BINDIR)/$(CONFIG)/bin_decoder_test \
   $(BINDIR)/$(CONFIG)/bin_encoder_test \
   $(BINDIR)/$(CONFIG)/census_context_test \
@@ -1249,6 +1214,7 @@ buildtests_c: privatelibs_c \
   $(BINDIR)/$(CONFIG)/dns_resolver_test \
   $(BINDIR)/$(CONFIG)/dualstack_socket_test \
   $(BINDIR)/$(CONFIG)/endpoint_pair_test \
+  $(BINDIR)/$(CONFIG)/ev_epoll_linux_test \
   $(BINDIR)/$(CONFIG)/fd_conservation_posix_test \
   $(BINDIR)/$(CONFIG)/fd_posix_test \
   $(BINDIR)/$(CONFIG)/fling_client \
@@ -1280,6 +1246,7 @@ buildtests_c: privatelibs_c \
   $(BINDIR)/$(CONFIG)/grpc_channel_stack_test \
   $(BINDIR)/$(CONFIG)/grpc_completion_queue_test \
   $(BINDIR)/$(CONFIG)/grpc_credentials_test \
+  $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 \
   $(BINDIR)/$(CONFIG)/grpc_invalid_channel_args_test \
   $(BINDIR)/$(CONFIG)/grpc_json_token_test \
   $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test \
@@ -1324,7 +1291,6 @@ buildtests_c: privatelibs_c \
   $(BINDIR)/$(CONFIG)/timeout_encoding_test \
   $(BINDIR)/$(CONFIG)/timer_heap_test \
   $(BINDIR)/$(CONFIG)/timer_list_test \
-  $(BINDIR)/$(CONFIG)/timers_test \
   $(BINDIR)/$(CONFIG)/transport_connectivity_state_test \
   $(BINDIR)/$(CONFIG)/transport_metadata_test \
   $(BINDIR)/$(CONFIG)/transport_security_test \
@@ -1388,7 +1354,7 @@ buildtests_c: privatelibs_c \
 
 
 ifeq ($(EMBED_OPENSSL),true)
-buildtests_cxx: buildtests_zookeeper privatelibs_cxx \
+buildtests_cxx: privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/alarm_cpp_test \
   $(BINDIR)/$(CONFIG)/async_end2end_test \
   $(BINDIR)/$(CONFIG)/auth_property_iterator_test \
@@ -1473,7 +1439,7 @@ buildtests_cxx: buildtests_zookeeper privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/boringssl_ssl_test \
 
 else
-buildtests_cxx: buildtests_zookeeper privatelibs_cxx \
+buildtests_cxx: privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/alarm_cpp_test \
   $(BINDIR)/$(CONFIG)/async_end2end_test \
   $(BINDIR)/$(CONFIG)/auth_property_iterator_test \
@@ -1522,17 +1488,9 @@ buildtests_cxx: buildtests_zookeeper privatelibs_cxx \
 endif
 
 
-ifeq ($(HAS_ZOOKEEPER),true)
-buildtests_zookeeper: privatelibs_zookeeper \
-
-else
-buildtests_zookeeper:
-endif
-
-
-test: test_c test_cxx test_zookeeper
+test: test_c test_cxx
 
-flaky_test: flaky_test_c flaky_test_cxx flaky_test_zookeeper
+flaky_test: flaky_test_c flaky_test_cxx
 
 test_c: buildtests_c
 	$(E) "[RUN]     Testing alarm_test"
@@ -1543,6 +1501,8 @@ test_c: buildtests_c
 	$(Q) $(BINDIR)/$(CONFIG)/alloc_test || ( echo test alloc_test failed ; exit 1 )
 	$(E) "[RUN]     Testing alpn_test"
 	$(Q) $(BINDIR)/$(CONFIG)/alpn_test || ( echo test alpn_test failed ; exit 1 )
+	$(E) "[RUN]     Testing bad_server_response_test"
+	$(Q) $(BINDIR)/$(CONFIG)/bad_server_response_test || ( echo test bad_server_response_test failed ; exit 1 )
 	$(E) "[RUN]     Testing bin_decoder_test"
 	$(Q) $(BINDIR)/$(CONFIG)/bin_decoder_test || ( echo test bin_decoder_test failed ; exit 1 )
 	$(E) "[RUN]     Testing bin_encoder_test"
@@ -1571,6 +1531,8 @@ test_c: buildtests_c
 	$(Q) $(BINDIR)/$(CONFIG)/dualstack_socket_test || ( echo test dualstack_socket_test failed ; exit 1 )
 	$(E) "[RUN]     Testing endpoint_pair_test"
 	$(Q) $(BINDIR)/$(CONFIG)/endpoint_pair_test || ( echo test endpoint_pair_test failed ; exit 1 )
+	$(E) "[RUN]     Testing ev_epoll_linux_test"
+	$(Q) $(BINDIR)/$(CONFIG)/ev_epoll_linux_test || ( echo test ev_epoll_linux_test failed ; exit 1 )
 	$(E) "[RUN]     Testing fd_conservation_posix_test"
 	$(Q) $(BINDIR)/$(CONFIG)/fd_conservation_posix_test || ( echo test fd_conservation_posix_test failed ; exit 1 )
 	$(E) "[RUN]     Testing fd_posix_test"
@@ -1705,8 +1667,6 @@ test_c: buildtests_c
 	$(Q) $(BINDIR)/$(CONFIG)/timer_heap_test || ( echo test timer_heap_test failed ; exit 1 )
 	$(E) "[RUN]     Testing timer_list_test"
 	$(Q) $(BINDIR)/$(CONFIG)/timer_list_test || ( echo test timer_list_test failed ; exit 1 )
-	$(E) "[RUN]     Testing timers_test"
-	$(Q) $(BINDIR)/$(CONFIG)/timers_test || ( echo test timers_test failed ; exit 1 )
 	$(E) "[RUN]     Testing transport_connectivity_state_test"
 	$(Q) $(BINDIR)/$(CONFIG)/transport_connectivity_state_test || ( echo test transport_connectivity_state_test failed ; exit 1 )
 	$(E) "[RUN]     Testing transport_metadata_test"
@@ -1754,7 +1714,7 @@ flaky_test_c: buildtests_c
 	$(Q) $(BINDIR)/$(CONFIG)/mlog_test || ( echo test mlog_test failed ; exit 1 )
 
 
-test_cxx: test_zookeeper buildtests_cxx
+test_cxx: buildtests_cxx
 	$(E) "[RUN]     Testing alarm_cpp_test"
 	$(Q) $(BINDIR)/$(CONFIG)/alarm_cpp_test || ( echo test alarm_cpp_test failed ; exit 1 )
 	$(E) "[RUN]     Testing async_end2end_test"
@@ -1822,18 +1782,6 @@ test_cxx: test_zookeeper buildtests_cxx
 flaky_test_cxx: buildtests_cxx
 
 
-ifeq ($(HAS_ZOOKEEPER),true)
-test_zookeeper: buildtests_zookeeper
-
-
-flaky_test_zookeeper: buildtests_zookeeper
-
-else
-test_zookeeper:
-flaky_test_zookeeper:
-endif
-
-
 test_python: static_c
 	$(E) "[RUN]     Testing python code"
 	$(Q) tools/run_tests/run_tests.py -lpython -c$(CONFIG)
@@ -1842,7 +1790,7 @@ test_python: static_c
 tools: tools_c tools_cxx
 
 
-tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt
+tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt
 
 tools_cxx: privatelibs_cxx
 
@@ -1871,8 +1819,6 @@ ifeq ($(CONFIG),opt)
 	$(Q) $(STRIP) $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a
 	$(E) "[STRIP]   Stripping libgrpc_unsecure.a"
 	$(Q) $(STRIP) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a
-ifeq ($(HAS_ZOOKEEPER),true)
-endif
 endif
 
 strip-static_cxx: static_cxx
@@ -1895,8 +1841,6 @@ ifeq ($(CONFIG),opt)
 	$(Q) $(STRIP) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT)
 	$(E) "[STRIP]   Stripping $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)"
 	$(Q) $(STRIP) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)
-ifeq ($(HAS_ZOOKEEPER),true)
-endif
 endif
 
 strip-shared_cxx: shared_cxx
@@ -1929,11 +1873,6 @@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc:
 	$(Q) mkdir -p $(@D)
 	$(Q) echo "$(GRPC_UNSECURE_PC_FILE)" | tr , '\n' >$@
 
-$(LIBDIR)/$(CONFIG)/pkgconfig/grpc_zookeeper.pc:
-	$(E) "[MAKE]    Generating $@"
-	$(Q) mkdir -p $(@D)
-	$(Q) echo "$(GRPC_ZOOKEEPER_PC_FILE)" | tr , '\n' >$@
-
 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc:
 	$(E) "[MAKE]    Generating $@"
 	$(Q) mkdir -p $(@D)
@@ -2209,8 +2148,6 @@ install-static_c: static_c strip-static_c install-pkg-config_c
 	$(E) "[INSTALL] Installing libgrpc_unsecure.a"
 	$(Q) $(INSTALL) -d $(prefix)/lib
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(prefix)/lib/libgrpc_unsecure.a
-ifeq ($(HAS_ZOOKEEPER),true)
-endif
 
 install-static_cxx: static_cxx strip-static_cxx install-pkg-config_cxx
 	$(E) "[INSTALL] Installing libgrpc++.a"
@@ -2232,7 +2169,7 @@ install-shared_c: shared_c strip-shared_c install-pkg-config_c
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgpr-imp.a $(prefix)/lib/libgpr-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so
 endif
 	$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2241,7 +2178,7 @@ endif
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc-imp.a $(prefix)/lib/libgrpc-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so
 endif
 	$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2250,7 +2187,7 @@ endif
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_cronet-imp.a $(prefix)/lib/libgrpc_cronet-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so
 endif
 	$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2259,11 +2196,9 @@ endif
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure-imp.a $(prefix)/lib/libgrpc_unsecure-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so
 endif
-ifeq ($(HAS_ZOOKEEPER),true)
-endif
 ifneq ($(SYSTEM),MINGW32)
 ifneq ($(SYSTEM),Darwin)
 	$(Q) ldconfig || true
@@ -2278,7 +2213,7 @@ install-shared_cxx: shared_cxx strip-shared_cxx install-shared_c install-pkg-con
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++-imp.a $(prefix)/lib/libgrpc++-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so
 endif
 	$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2287,7 +2222,7 @@ endif
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection-imp.a $(prefix)/lib/libgrpc++_reflection-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so
 endif
 	$(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT)"
@@ -2296,11 +2231,9 @@ endif
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure-imp.a $(prefix)/lib/libgrpc++_unsecure-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so
 endif
-ifeq ($(HAS_ZOOKEEPER),true)
-endif
 ifneq ($(SYSTEM),MINGW32)
 ifneq ($(SYSTEM),Darwin)
 	$(Q) ldconfig || true
@@ -2315,11 +2248,9 @@ install-shared_csharp: shared_csharp strip-shared_csharp
 ifeq ($(SYSTEM),MINGW32)
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext-imp.a $(prefix)/lib/libgrpc_csharp_ext-imp.a
 else ifneq ($(SYSTEM),Darwin)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.0
+	$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so
 endif
-ifeq ($(HAS_ZOOKEEPER),true)
-endif
 ifneq ($(SYSTEM),MINGW32)
 ifneq ($(SYSTEM),Darwin)
 	$(Q) ldconfig || true
@@ -2346,14 +2277,11 @@ else
 	$(Q) $(INSTALL) $(BINDIR)/$(CONFIG)/grpc_ruby_plugin $(prefix)/bin/grpc_ruby_plugin
 endif
 
-install-pkg-config_c: pc_c pc_c_unsecure pc_c_zookeeper
+install-pkg-config_c: pc_c pc_c_unsecure
 	$(E) "[INSTALL] Installing C pkg-config files"
 	$(Q) $(INSTALL) -d $(prefix)/lib/pkgconfig
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(prefix)/lib/pkgconfig/grpc.pc
 	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(prefix)/lib/pkgconfig/grpc_unsecure.pc
-ifeq ($(HAS_ZOOKEEPER),true)
-	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_zookeeper.pc $(prefix)/lib/pkgconfig/grpc_zookeeper.pc
-endif
 
 install-pkg-config_cxx: pc_cxx pc_cxx_unsecure
 	$(E) "[INSTALL] Installing C++ pkg-config files"
@@ -2513,8 +2441,8 @@ $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT): $(LIBGPR_OBJS)  $(ZLI
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
 else
-	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.0 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
-	$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.0
+	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.1 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
+	$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so
 endif
 endif
@@ -2569,6 +2497,7 @@ LIBGRPC_SRC = \
     src/core/lib/iomgr/endpoint_pair_posix.c \
     src/core/lib/iomgr/endpoint_pair_windows.c \
     src/core/lib/iomgr/error.c \
+    src/core/lib/iomgr/ev_epoll_linux.c \
     src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
     src/core/lib/iomgr/ev_poll_posix.c \
     src/core/lib/iomgr/ev_posix.c \
@@ -2579,6 +2508,7 @@ LIBGRPC_SRC = \
     src/core/lib/iomgr/iomgr_posix.c \
     src/core/lib/iomgr/iomgr_windows.c \
     src/core/lib/iomgr/load_file.c \
+    src/core/lib/iomgr/network_status_tracker.c \
     src/core/lib/iomgr/polling_entity.c \
     src/core/lib/iomgr/pollset_set_windows.c \
     src/core/lib/iomgr/pollset_windows.c \
@@ -2805,8 +2735,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_OBJS)  $(Z
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
 else
-	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.0
+	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
+	$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so
 endif
 endif
@@ -2840,6 +2770,7 @@ LIBGRPC_CRONET_SRC = \
     src/core/lib/iomgr/endpoint_pair_posix.c \
     src/core/lib/iomgr/endpoint_pair_windows.c \
     src/core/lib/iomgr/error.c \
+    src/core/lib/iomgr/ev_epoll_linux.c \
     src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
     src/core/lib/iomgr/ev_poll_posix.c \
     src/core/lib/iomgr/ev_posix.c \
@@ -2850,6 +2781,7 @@ LIBGRPC_CRONET_SRC = \
     src/core/lib/iomgr/iomgr_posix.c \
     src/core/lib/iomgr/iomgr_windows.c \
     src/core/lib/iomgr/load_file.c \
+    src/core/lib/iomgr/network_status_tracker.c \
     src/core/lib/iomgr/polling_entity.c \
     src/core/lib/iomgr/pollset_set_windows.c \
     src/core/lib/iomgr/pollset_windows.c \
@@ -3052,8 +2984,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_CRO
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
 else
-	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.0
+	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
+	$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so
 endif
 endif
@@ -3103,6 +3035,7 @@ LIBGRPC_TEST_UTIL_SRC = \
     src/core/lib/iomgr/endpoint_pair_posix.c \
     src/core/lib/iomgr/endpoint_pair_windows.c \
     src/core/lib/iomgr/error.c \
+    src/core/lib/iomgr/ev_epoll_linux.c \
     src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
     src/core/lib/iomgr/ev_poll_posix.c \
     src/core/lib/iomgr/ev_posix.c \
@@ -3113,6 +3046,7 @@ LIBGRPC_TEST_UTIL_SRC = \
     src/core/lib/iomgr/iomgr_posix.c \
     src/core/lib/iomgr/iomgr_windows.c \
     src/core/lib/iomgr/load_file.c \
+    src/core/lib/iomgr/network_status_tracker.c \
     src/core/lib/iomgr/polling_entity.c \
     src/core/lib/iomgr/pollset_set_windows.c \
     src/core/lib/iomgr/pollset_windows.c \
@@ -3291,6 +3225,7 @@ LIBGRPC_UNSECURE_SRC = \
     src/core/lib/iomgr/endpoint_pair_posix.c \
     src/core/lib/iomgr/endpoint_pair_windows.c \
     src/core/lib/iomgr/error.c \
+    src/core/lib/iomgr/ev_epoll_linux.c \
     src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
     src/core/lib/iomgr/ev_poll_posix.c \
     src/core/lib/iomgr/ev_posix.c \
@@ -3301,6 +3236,7 @@ LIBGRPC_UNSECURE_SRC = \
     src/core/lib/iomgr/iomgr_posix.c \
     src/core/lib/iomgr/iomgr_windows.c \
     src/core/lib/iomgr/load_file.c \
+    src/core/lib/iomgr/network_status_tracker.c \
     src/core/lib/iomgr/polling_entity.c \
     src/core/lib/iomgr/pollset_set_windows.c \
     src/core/lib/iomgr/pollset_windows.c \
@@ -3484,8 +3420,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_U
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
 else
-	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.0
+	$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
+	$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so
 endif
 endif
@@ -3754,8 +3690,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS)
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
 else
-	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
-	$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.0
+	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
+	$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so
 endif
 endif
@@ -3881,8 +3817,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT): $(LIBGR
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
 else
-	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
-	$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.0
+	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
+	$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so
 endif
 endif
@@ -4231,8 +4167,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc
 else
-	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc
-	$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.0
+	$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc
+	$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so
 endif
 endif
@@ -4495,7 +4431,7 @@ LIBINTEROP_SERVER_MAIN_SRC = \
     $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc \
     $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc \
     $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc \
-    test/cpp/interop/server_main.cc \
+    test/cpp/interop/interop_server.cc \
 
 PUBLIC_HEADERS_CXX += \
 
@@ -4541,7 +4477,7 @@ ifneq ($(NO_DEPS),true)
 -include $(LIBINTEROP_SERVER_MAIN_OBJS:.o=.dep)
 endif
 endif
-$(OBJDIR)/$(CONFIG)/test/cpp/interop/server_main.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc
+$(OBJDIR)/$(CONFIG)/test/cpp/interop/interop_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc
 
 
 LIBQPS_SRC = \
@@ -4661,8 +4597,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC
 ifeq ($(SYSTEM),Darwin)
 	$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
 else
-	$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
-	$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.0
+	$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
+	$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.1
 	$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so
 endif
 endif
@@ -6495,6 +6431,7 @@ LIBEND2END_TESTS_SRC = \
     test/core/end2end/tests/max_concurrent_streams.c \
     test/core/end2end/tests/max_message_length.c \
     test/core/end2end/tests/negative_deadline.c \
+    test/core/end2end/tests/network_status_change.c \
     test/core/end2end/tests/no_op.c \
     test/core/end2end/tests/payload.c \
     test/core/end2end/tests/ping.c \
@@ -6508,6 +6445,7 @@ LIBEND2END_TESTS_SRC = \
     test/core/end2end/tests/simple_delayed_request.c \
     test/core/end2end/tests/simple_metadata.c \
     test/core/end2end/tests/simple_request.c \
+    test/core/end2end/tests/streaming_error_response.c \
     test/core/end2end/tests/trailing_metadata.c \
 
 PUBLIC_HEADERS_C += \
@@ -6571,6 +6509,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
     test/core/end2end/tests/max_concurrent_streams.c \
     test/core/end2end/tests/max_message_length.c \
     test/core/end2end/tests/negative_deadline.c \
+    test/core/end2end/tests/network_status_change.c \
     test/core/end2end/tests/no_op.c \
     test/core/end2end/tests/payload.c \
     test/core/end2end/tests/ping.c \
@@ -6584,6 +6523,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
     test/core/end2end/tests/simple_delayed_request.c \
     test/core/end2end/tests/simple_metadata.c \
     test/core/end2end/tests/simple_request.c \
+    test/core/end2end/tests/streaming_error_response.c \
     test/core/end2end/tests/trailing_metadata.c \
 
 PUBLIC_HEADERS_C += \
@@ -6772,6 +6712,38 @@ endif
 endif
 
 
+BAD_SERVER_RESPONSE_TEST_SRC = \
+    test/core/end2end/bad_server_response_test.c \
+
+BAD_SERVER_RESPONSE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BAD_SERVER_RESPONSE_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/bad_server_response_test: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/bad_server_response_test: $(BAD_SERVER_RESPONSE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(BAD_SERVER_RESPONSE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/bad_server_response_test
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/end2end/bad_server_response_test.o:  $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_bad_server_response_test: $(BAD_SERVER_RESPONSE_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(BAD_SERVER_RESPONSE_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 BIN_DECODER_TEST_SRC = \
     test/core/transport/chttp2/bin_decoder_test.c \
 
@@ -7252,6 +7224,38 @@ endif
 endif
 
 
+EV_EPOLL_LINUX_TEST_SRC = \
+    test/core/iomgr/ev_epoll_linux_test.c \
+
+EV_EPOLL_LINUX_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(EV_EPOLL_LINUX_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/ev_epoll_linux_test: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/ev_epoll_linux_test: $(EV_EPOLL_LINUX_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(EV_EPOLL_LINUX_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/ev_epoll_linux_test
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/iomgr/ev_epoll_linux_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_ev_epoll_linux_test: $(EV_EPOLL_LINUX_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(EV_EPOLL_LINUX_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 FD_CONSERVATION_POSIX_TEST_SRC = \
     test/core/iomgr/fd_conservation_posix_test.c \
 
@@ -8290,14 +8294,14 @@ else
 
 
 
-$(BINDIR)/$(CONFIG)/grpc_create_jwt: $(GRPC_CREATE_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(BINDIR)/$(CONFIG)/grpc_create_jwt: $(GRPC_CREATE_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GRPC_CREATE_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_create_jwt
+	$(Q) $(LD) $(LDFLAGS) $(GRPC_CREATE_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_create_jwt
 
 endif
 
-$(OBJDIR)/$(CONFIG)/test/core/security/create_jwt.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(OBJDIR)/$(CONFIG)/test/core/security/create_jwt.o:  $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
 
 deps_grpc_create_jwt: $(GRPC_CREATE_JWT_OBJS:.o=.dep)
 
@@ -8482,14 +8486,14 @@ else
 
 
 
-$(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token: $(GRPC_PRINT_GOOGLE_DEFAULT_CREDS_TOKEN_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token: $(GRPC_PRINT_GOOGLE_DEFAULT_CREDS_TOKEN_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GRPC_PRINT_GOOGLE_DEFAULT_CREDS_TOKEN_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token
+	$(Q) $(LD) $(LDFLAGS) $(GRPC_PRINT_GOOGLE_DEFAULT_CREDS_TOKEN_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token
 
 endif
 
-$(OBJDIR)/$(CONFIG)/test/core/security/print_google_default_creds_token.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(OBJDIR)/$(CONFIG)/test/core/security/print_google_default_creds_token.o:  $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
 
 deps_grpc_print_google_default_creds_token: $(GRPC_PRINT_GOOGLE_DEFAULT_CREDS_TOKEN_OBJS:.o=.dep)
 
@@ -8546,14 +8550,14 @@ else
 
 
 
-$(BINDIR)/$(CONFIG)/grpc_verify_jwt: $(GRPC_VERIFY_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(BINDIR)/$(CONFIG)/grpc_verify_jwt: $(GRPC_VERIFY_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GRPC_VERIFY_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_verify_jwt
+	$(Q) $(LD) $(LDFLAGS) $(GRPC_VERIFY_JWT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_verify_jwt
 
 endif
 
-$(OBJDIR)/$(CONFIG)/test/core/security/verify_jwt.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(OBJDIR)/$(CONFIG)/test/core/security/verify_jwt.o:  $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
 
 deps_grpc_verify_jwt: $(GRPC_VERIFY_JWT_OBJS:.o=.dep)
 
@@ -10100,38 +10104,6 @@ endif
 endif
 
 
-TIMERS_TEST_SRC = \
-    test/core/profiling/timers_test.c \
-
-TIMERS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(TIMERS_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/timers_test: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/timers_test: $(TIMERS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(TIMERS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/timers_test
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/core/profiling/timers_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-
-deps_timers_test: $(TIMERS_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(TIMERS_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
 TRANSPORT_CONNECTIVITY_STATE_TEST_SRC = \
     test/core/transport/connectivity_state_test.c \
 
@@ -15103,8 +15075,8 @@ test/cpp/end2end/test_service_impl.cc: $(OPENSSL_DEP)
 test/cpp/interop/client.cc: $(OPENSSL_DEP)
 test/cpp/interop/client_helper.cc: $(OPENSSL_DEP)
 test/cpp/interop/interop_client.cc: $(OPENSSL_DEP)
+test/cpp/interop/interop_server.cc: $(OPENSSL_DEP)
 test/cpp/interop/server_helper.cc: $(OPENSSL_DEP)
-test/cpp/interop/server_main.cc: $(OPENSSL_DEP)
 test/cpp/qps/client_async.cc: $(OPENSSL_DEP)
 test/cpp/qps/client_sync.cc: $(OPENSSL_DEP)
 test/cpp/qps/driver.cc: $(OPENSSL_DEP)
diff --git a/PYTHON-MANIFEST.in b/PYTHON-MANIFEST.in
index 534f4c1251a47de23fb37017a563e9b4060b35fb..175a47f15764865ff137482bea2bacdea3cc2c2c 100644
--- a/PYTHON-MANIFEST.in
+++ b/PYTHON-MANIFEST.in
@@ -1,12 +1,13 @@
 recursive-include src/python/grpcio/grpc *.c *.h *.py *.pyx *.pxd *.pxi *.python *.pem
 recursive-exclude src/python/grpcio/grpc/_cython *.so *.pyd
-graft src/python/grpcio/tests
+graft src/python/grpcio/grpcio.egg-info
 graft src/core
 graft src/boringssl
 graft include/grpc
 graft third_party/boringssl
 graft third_party/nanopb
 graft third_party/zlib
+include src/python/grpcio/build.py
 include src/python/grpcio/commands.py
 include src/python/grpcio/grpc_version.py
 include src/python/grpcio/grpc_core_dependencies.py
diff --git a/Rakefile b/Rakefile
index f208a24fd33e46bfc028944aa7103247a36ef32b..f44946fe93739299a051de3fea84caf684e61c04 100755
--- a/Rakefile
+++ b/Rakefile
@@ -77,7 +77,7 @@ task 'dlls' do
   grpc_config = ENV['GRPC_CONFIG'] || 'opt'
   verbose = ENV['V'] || '0'
 
-  env = 'CPPFLAGS="-D_WIN32_WINNT=0x600 -DUNICODE -D_UNICODE" '
+  env = 'CPPFLAGS="-D_WIN32_WINNT=0x600 -DUNICODE -D_UNICODE -Wno-unused-variable -Wno-unused-result" '
   env += 'LDFLAGS=-static '
   env += 'SYSTEM=MINGW32 '
   env += 'EMBED_ZLIB=true '
diff --git a/binding.gyp b/binding.gyp
index 9b536d64341ed677f92b404d907ff56fa9e48559..c39afe28645df5ada257a1eb57f119439b18d76f 100644
--- a/binding.gyp
+++ b/binding.gyp
@@ -581,6 +581,7 @@
         'src/core/lib/iomgr/endpoint_pair_posix.c',
         'src/core/lib/iomgr/endpoint_pair_windows.c',
         'src/core/lib/iomgr/error.c',
+        'src/core/lib/iomgr/ev_epoll_linux.c',
         'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
         'src/core/lib/iomgr/ev_poll_posix.c',
         'src/core/lib/iomgr/ev_posix.c',
@@ -591,6 +592,7 @@
         'src/core/lib/iomgr/iomgr_posix.c',
         'src/core/lib/iomgr/iomgr_windows.c',
         'src/core/lib/iomgr/load_file.c',
+        'src/core/lib/iomgr/network_status_tracker.c',
         'src/core/lib/iomgr/polling_entity.c',
         'src/core/lib/iomgr/pollset_set_windows.c',
         'src/core/lib/iomgr/pollset_windows.c',
diff --git a/build.yaml b/build.yaml
index 4f2d53e4d663d62586871d07b7c64e3e06dbb929..c1c6708c08c123a607e9dbd0a700134277e2f975 100644
--- a/build.yaml
+++ b/build.yaml
@@ -7,7 +7,7 @@ settings:
   '#3': Use "-preN" suffixes to identify pre-release versions
   '#4': Per-language overrides are possible with (eg) ruby_version tag here
   '#5': See the expand_version.py for all the quirks here
-  version: 0.15.0-dev
+  version: 1.1.0-dev
 filegroups:
 - name: census
   public_headers:
@@ -141,11 +141,6 @@ filegroups:
   - include/grpc/impl/codegen/sync_posix.h
   - include/grpc/impl/codegen/sync_windows.h
   - include/grpc/impl/codegen/time.h
-- name: grpc++_codegen_base_src
-  src:
-  - src/cpp/codegen/codegen_init.cc
-  uses:
-  - grpc++_codegen_base
 - name: grpc_base
   public_headers:
   - include/grpc/byte_buffer.h
@@ -173,6 +168,7 @@ filegroups:
   - src/core/lib/iomgr/endpoint.h
   - src/core/lib/iomgr/endpoint_pair.h
   - src/core/lib/iomgr/error.h
+  - src/core/lib/iomgr/ev_epoll_linux.h
   - src/core/lib/iomgr/ev_poll_and_epoll_posix.h
   - src/core/lib/iomgr/ev_poll_posix.h
   - src/core/lib/iomgr/ev_posix.h
@@ -183,6 +179,7 @@ filegroups:
   - src/core/lib/iomgr/iomgr_internal.h
   - src/core/lib/iomgr/iomgr_posix.h
   - src/core/lib/iomgr/load_file.h
+  - src/core/lib/iomgr/network_status_tracker.h
   - src/core/lib/iomgr/polling_entity.h
   - src/core/lib/iomgr/pollset.h
   - src/core/lib/iomgr/pollset_set.h
@@ -224,7 +221,6 @@ filegroups:
   - src/core/lib/surface/init.h
   - src/core/lib/surface/lame_client.h
   - src/core/lib/surface/server.h
-  - src/core/lib/surface/surface_trace.h
   - src/core/lib/transport/byte_stream.h
   - src/core/lib/transport/connectivity_state.h
   - src/core/lib/transport/metadata.h
@@ -251,6 +247,7 @@ filegroups:
   - src/core/lib/iomgr/endpoint_pair_posix.c
   - src/core/lib/iomgr/endpoint_pair_windows.c
   - src/core/lib/iomgr/error.c
+  - src/core/lib/iomgr/ev_epoll_linux.c
   - src/core/lib/iomgr/ev_poll_and_epoll_posix.c
   - src/core/lib/iomgr/ev_poll_posix.c
   - src/core/lib/iomgr/ev_posix.c
@@ -261,6 +258,7 @@ filegroups:
   - src/core/lib/iomgr/iomgr_posix.c
   - src/core/lib/iomgr/iomgr_windows.c
   - src/core/lib/iomgr/load_file.c
+  - src/core/lib/iomgr/network_status_tracker.c
   - src/core/lib/iomgr/polling_entity.c
   - src/core/lib/iomgr/pollset_set_windows.c
   - src/core/lib/iomgr/pollset_windows.c
@@ -756,6 +754,12 @@ filegroups:
   - include/grpc++/impl/codegen/time.h
   uses:
   - grpc_codegen
+- name: grpc++_codegen_base_src
+  language: c++
+  src:
+  - src/cpp/codegen/codegen_init.cc
+  uses:
+  - grpc++_codegen_base
 - name: grpc++_codegen_proto
   language: c++
   public_headers:
@@ -1123,7 +1127,7 @@ libs:
   - src/proto/grpc/testing/empty.proto
   - src/proto/grpc/testing/messages.proto
   - src/proto/grpc/testing/test.proto
-  - test/cpp/interop/server_main.cc
+  - test/cpp/interop/interop_server.cc
   deps:
   - interop_server_helper
   - grpc++_test_util
@@ -1244,6 +1248,17 @@ targets:
   - test/core/end2end/fuzzers/api_fuzzer_corpus
   dict: test/core/end2end/fuzzers/api_fuzzer.dictionary
   maxlen: 2048
+- name: bad_server_response_test
+  build: test
+  language: c
+  src:
+  - test/core/end2end/bad_server_response_test.c
+  deps:
+  - test_tcp_server
+  - grpc_test_util
+  - grpc
+  - gpr_test_util
+  - gpr
 - name: bin_decoder_test
   build: test
   language: c
@@ -1400,6 +1415,18 @@ targets:
   - grpc
   - gpr_test_util
   - gpr
+- name: ev_epoll_linux_test
+  build: test
+  language: c
+  src:
+  - test/core/iomgr/ev_epoll_linux_test.c
+  deps:
+  - grpc_test_util
+  - grpc
+  - gpr_test_util
+  - gpr
+  platforms:
+  - linux
 - name: fd_conservation_posix_test
   build: test
   language: c
@@ -1607,7 +1634,7 @@ targets:
   - gpr_test_util
   - gpr
 - name: gpr_sync_test
-  cpu_cost: 3
+  cpu_cost: 10
   build: test
   language: c
   src:
@@ -1616,7 +1643,7 @@ targets:
   - gpr_test_util
   - gpr
 - name: gpr_thd_test
-  cpu_cost: 1
+  cpu_cost: 10
   build: test
   language: c
   src:
@@ -1714,10 +1741,9 @@ targets:
   src:
   - test/core/security/create_jwt.c
   deps:
-  - grpc_test_util
   - grpc
-  - gpr_test_util
   - gpr
+  secure: true
 - name: grpc_credentials_test
   build: test
   language: c
@@ -1729,7 +1755,8 @@ targets:
   - gpr_test_util
   - gpr
 - name: grpc_fetch_oauth2
-  build: tool
+  build: test
+  run: false
   language: c
   src:
   - test/core/security/fetch_oauth2.c
@@ -1778,9 +1805,7 @@ targets:
   src:
   - test/core/security/print_google_default_creds_token.c
   deps:
-  - grpc_test_util
   - grpc
-  - gpr_test_util
   - gpr
 - name: grpc_security_connector_test
   build: test
@@ -1798,9 +1823,7 @@ targets:
   src:
   - test/core/security/verify_jwt.c
   deps:
-  - grpc_test_util
   - grpc
-  - gpr_test_util
   - gpr
 - name: hpack_parser_fuzzer_test
   build: fuzzer
@@ -2342,16 +2365,6 @@ targets:
   - grpc
   - gpr_test_util
   - gpr
-- name: timers_test
-  build: test
-  language: c
-  src:
-  - test/core/profiling/timers_test.c
-  deps:
-  - grpc_test_util
-  - grpc
-  - gpr_test_util
-  - gpr
 - name: transport_connectivity_state_test
   build: test
   language: c
@@ -3289,6 +3302,7 @@ configs:
     CPPFLAGS: -O0 -fsanitize=thread -fno-omit-frame-pointer -Wno-unused-command-line-argument
       -DGPR_NO_DIRECT_SYSCALLS
     CXX: clang++
+    DEFINES: GRPC_TSAN
     LD: clang
     LDFLAGS: -fsanitize=thread
     LDXX: clang++
diff --git a/composer.json b/composer.json
index 05ac0037146fae19f0631a63c0d7a7102a5636a2..0ebe0a11084478145130439d2a71f7671b63d3a9 100644
--- a/composer.json
+++ b/composer.json
@@ -5,16 +5,12 @@
   "keywords": ["rpc"],
   "homepage": "http://grpc.io",
   "license": "BSD-3-Clause",
-  "repositories": [
-    {
-      "type": "vcs",
-      "url": "https://github.com/stanley-cheung/Protobuf-PHP"
-    }
-  ],
   "require": {
     "php": ">=5.5.0",
-    "datto/protobuf-php": "dev-master",
-    "google/auth": "v0.7"
+    "stanley-cheung/protobuf-php": "dev-master"
+  },
+  "require-dev": {
+    "google/auth": "v0.9"
   },
   "autoload": {
     "psr-4": {
diff --git a/config.m4 b/config.m4
index 48d65013d6aea54e7589c300cb4e3235cba39dc1..d6d3c8a3a2657ad6422f3ebf66b70c1e3a5f222c 100644
--- a/config.m4
+++ b/config.m4
@@ -100,6 +100,7 @@ if test "$PHP_GRPC" != "no"; then
     src/core/lib/iomgr/endpoint_pair_posix.c \
     src/core/lib/iomgr/endpoint_pair_windows.c \
     src/core/lib/iomgr/error.c \
+    src/core/lib/iomgr/ev_epoll_linux.c \
     src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
     src/core/lib/iomgr/ev_poll_posix.c \
     src/core/lib/iomgr/ev_posix.c \
@@ -110,6 +111,7 @@ if test "$PHP_GRPC" != "no"; then
     src/core/lib/iomgr/iomgr_posix.c \
     src/core/lib/iomgr/iomgr_windows.c \
     src/core/lib/iomgr/load_file.c \
+    src/core/lib/iomgr/network_status_tracker.c \
     src/core/lib/iomgr/polling_entity.c \
     src/core/lib/iomgr/pollset_set_windows.c \
     src/core/lib/iomgr/pollset_windows.c \
diff --git a/doc/PROTOCOL-HTTP2.md b/doc/PROTOCOL-HTTP2.md
index 357ea72571b25c320ba49f49a0d56764483eddf6..31d694b803dbe56c688c484d2861ff6bb8178d3f 100644
--- a/doc/PROTOCOL-HTTP2.md
+++ b/doc/PROTOCOL-HTTP2.md
@@ -38,7 +38,7 @@ Request-Headers are delivered as HTTP2 headers in HEADERS + CONTINUATION frames.
 * **Nanosecond** → "n"
 * **Content-Type** → "content-type" "application/grpc" [("+proto" / "+json" / {_custom_})]
 * **Content-Coding** → "identity" / "gzip" / "deflate" / "snappy" / {_custom_}
-* **Message-Encoding** → "grpc-encoding" Content-Coding
+* <a name="message-encoding"></a>**Message-Encoding** → "grpc-encoding" Content-Coding
 * **Message-Accept-Encoding** → "grpc-accept-encoding" Content-Coding \*("," Content-Coding)
 * **User-Agent** → "user-agent" {_structured user-agent string_}
 * **Message-Type** → "grpc-message-type" {_type name for message schema_}
@@ -83,7 +83,7 @@ binary values' lengths being post-Base64.
 The repeated sequence of **Length-Prefixed-Message** items is delivered in DATA frames
 
 * **Length-Prefixed-Message** → Compressed-Flag Message-Length Message
-* **Compressed-Flag** → 0 / 1   # encoded as 1 byte unsigned integer
+* <a name="compressed-flag"></a>**Compressed-Flag** → 0 / 1   # encoded as 1 byte unsigned integer
 * **Message-Length** → {_length of Message_}  # encoded as 4 byte unsigned integer
 * **Message** → \*{binary octet}
 
diff --git a/doc/compression.md b/doc/compression.md
new file mode 100644
index 0000000000000000000000000000000000000000..de245d90feea4b9b61676e8aaf4c7b568b4b51a7
--- /dev/null
+++ b/doc/compression.md
@@ -0,0 +1,118 @@
+## **gRPC Compression**
+
+The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
+"SHOULD NOT", "RECOMMENDED",  "MAY", and "OPTIONAL" in this document are to be
+interpreted as described in [RFC 2119](http://www.ietf.org/rfc/rfc2119.txt).
+
+### Intent
+
+Compression is used to reduce the amount of bandwidth used between peers. The
+compression supported by gRPC acts _at the individual message level_, taking
+_message_ [as defined in the wire format
+document](PROTOCOL-HTTP2.md).
+
+The implementation supports different compression algorithms. A _default
+compression level_, to be used in the absence of message-specific settings, MAY
+be specified for during channel creation.
+
+The ability to control compression settings per call and to enable/disable
+compression on a per message basis MAY be used to prevent CRIME/BEAST attacks.
+It also allows for asymmetric compression communication, whereby a response MAY
+be compressed differently, if at all.
+
+### Specification
+
+Compression MAY be configured by the Client Application by calling the
+appropriate API method. There are two scenarios where compression MAY be
+configured:
+
++  At channel creation time, which sets the channel default compression and
+   therefore the compression that SHALL be used in the absence of per-RPC
+   compression configuration.
++  At response time, via:
+   +  For unary RPCs, the {Client,Server}Context instance. 
+   +  For streaming RPCs, the {Client,Server}Writer instance. In this case,
+      configuration is reduced to disabling compression altogether.
+
+### Compression Method Asymmetry Between Peers
+
+A gRPC peer MAY choose to respond using a different compression method to that
+of the request, including not performing any compression, regardless of channel
+and RPC settings (for example, if compression would result in small or negative
+gains).
+
+When a message from a client compressed with an unsupported algorithm is
+processed by a server, it WILL result in an `UNIMPLEMENTED` error status on the
+server. The server will then include in its response a `grpc-accept-encoding`
+header specifying the algorithms it does accept. If an `UNIMPLEMENTED` error
+status is returned from the server despite having used one of the algorithms
+from the `grpc-accept-encoding` header, the cause MUST NOT be related to
+compression. Data sent from a server compressed with an algorithm not supported
+by the client WILL result in an `INTERNAL` error status on the client side.
+
+Note that a peer MAY choose to not disclose all the encodings it supports.
+However, if it receives a message compressed in an undisclosed but supported
+encoding, it MUST include said encoding in the response's `grpc-accept-encoding
+h`eader.
+
+For every message a server is requested to compress using an algorithm it knows
+the client doesn't support (as indicated by the last `grpc-accept-encoding`
+header received from the client), it SHALL send the message uncompressed. 
+
+### Specific Disabling of Compression
+
+If the user (through the previously described mechanisms) requests to disable
+compression the next message MUST be sent uncompressed. This is instrumental in
+preventing BEAST/CRIME attacks. This applies to both the the unary and streaming
+cases.
+
+### Compression Levels and Algorithms
+
+The set of supported algorithm is implementation dependent. In order to simplify
+the public API and to operate seamlessly across implementations (both in terms
+of languages but also different version of the same one), we introduce the idea
+of _compression levels_ (such as "low", "medium", "high").
+
+Levels map to concrete algorithms and/or their settings (such as "low" mapping
+to "gzip -3" and "high" mapping to "gzip -9") automatically depending on what a
+peer is known to support. A server is always aware of what its clients support,
+as clients disclose it in their Message-Accept-Encoding header as part of their
+initial call. A client doesn't a priori (presently) know which algorithms a
+server supports. This issue can be addressed with an initial negotiation of
+capabilities or an automatic retry mechanism. These features will be implemented
+in the future. Currently however, compression levels are only supported at the
+server side, which is aware of the client's capabilities through the incoming
+Message-Accept-Encoding header.
+
+### Propagation to child RPCs
+
+The inheritance of the compression configuration by child RPCs is left up to the
+implementation. Note that in the absence of changes to the parent channel, its
+configuration will be used.
+
+### Test cases
+
+1. When a compression level is not specified for either the channel or the
+message, the default channel level _none_ is considered: data MUST NOT be
+compressed.
+1. When per-RPC compression configuration isn't present for a message, the
+channel compression configuration MUST be used.
+1. When a compression method (including no compression) is specified for an
+outgoing message, the message MUST be compressed accordingly.
+1. A message compressed by a client in a way not supported by its server MUST
+fail with status `UNIMPLEMENTED`, its associated description indicating the
+unsupported condition as well as the supported ones. The returned
+`grpc-accept-encoding` header MUST NOT contain the compression method
+(encoding) used.
+1. A message compressed by a server in a way not supported by its client MUST
+fail with status `INTERNAL`, its associated description indicating the
+unsupported condition as well as the supported ones. The returned
+`grpc-accept-encoding` header MUST NOT contain the compression method
+(encoding) used.
+1. An ill-constructed message with its [Compressed-Flag
+bit](PROTOCOL-HTTP2.md#compressed-flag)
+set but lacking a
+"[grpc-encoding](PROTOCOL-HTTP2.md#message-encoding)"
+entry different from _identity_ in its metadata MUST fail with `INTERNAL`
+status, its associated description indicating the invalid Compressed-Flag
+condition.
diff --git a/doc/compression_cookbook.md b/doc/compression_cookbook.md
new file mode 100644
index 0000000000000000000000000000000000000000..c10a805f20406e716a26a75c176ce39f18a0af53
--- /dev/null
+++ b/doc/compression_cookbook.md
@@ -0,0 +1,133 @@
+# gRPC (Core) Compression Cookbook
+
+## Introduction
+
+This document describes compression as implemented by the gRPC C core. See [the
+full compression specification](compression.md) for details.
+
+### Intended Audience
+
+Wrapped languages developers, for the purposes of supporting compression by
+interacting with the C core.
+
+## Criteria for GA readiness
+
+1. Be able to set compression at [channel](#per-channel-settings),
+   [call](#per-call-settings) and [message](#per-message-settings) level.
+   In principle this API should be based on _compression levels_ as opposed to
+   algorithms. See the discussion [below](#level-vs-algorithms).
+1. Have unit tests covering [the cases from the
+   spec](https://github.com/grpc/grpc/blob/master/doc/compression.md#test-cases).
+1. Interop tests implemented and passing on Jenkins. The two relevant interop
+   test cases are
+   [large_compressed_unary](https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#large_compressed_unary)
+   and
+   [server_compressed_streaming](https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#server_compressed_streaming).
+
+## Summary Flowcharts
+
+The following flowcharts depict the evolution of a message, both _incoming_ and
+_outgoing_, irrespective of the client/server character of the call. Aspects
+still not symmetric between clients and servers (e.g. the [use of compression
+levels](https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-levels-and-algorithms))
+are explicitly marked. The in-detail textual description for the different
+scenarios is described in subsequent sections.
+
+## Incoming Messages
+
+![image](images/compression_cookbook_incoming.png)
+
+## Outgoing Messages
+
+![image](images/compression_cookbook_outgoing.png)
+
+## Levels vs Algorithms
+
+As mentioned in [the relevant discussion on the spec
+document](https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-levels-and-algorithms),
+compression _levels_ are the primary mechanism for compression selection _at the
+server side_. In the future, it'll also be at the client side. The use of levels
+abstracts away the intricacies of selecting a concrete algorithm supported by a
+peer, on top of removing the burden of choice from the developer.
+As of this writing (Q2 2016), clients can only specify compression _algorithms_.
+Clients will support levels as soon as an automatic retry/negotiation mechanism
+is in place.
+
+## Per Channel Settings
+
+Compression may be configured at channel creation. This is a convenience to
+avoid having to repeatedly configure compression for every call. Note that any
+compression setting on individual [calls](#per-call-settings) or
+[messages](#per-message-settings) overrides channel settings.
+
+The following aspects can be configured at channel-creation time via channel arguments:
+
+#### Disable Compression _Algorithms_
+
+Use the channel argument key
+`GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET` (from
+[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)),
+takes a 32 bit bitset value. A set bit means the algorithm with that enum value
+according to `grpc_compression_algorithm` is _enabled_.
+For example, `GRPC_COMPRESS_GZIP` currently has a numeric value of 2. To
+enable/disable GZIP for a channel, one would set/clear the 3rd LSB (eg, 0b100 =
+0x4). Note that setting/clearing 0th position, that corresponding to
+`GRPC_COMPRESS_NONE`, has no effect, as no-compression (a.k.a. _identity_) is
+always supported.
+Incoming messages compressed (ie, encoded) with a disabled algorithm will result
+in the call being closed with `GRPC_STATUS_UNIMPLEMENTED`.
+
+#### Default Compression _Level_
+
+**(currently, Q2 2016, only applicable for server side channels. It's ignored
+for clients.)**
+Use the channel argument key `GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL` (from
+[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)),
+valued by an integer corresponding to a value from the `grpc_compression_level`
+enum.
+
+#### Default Compression _Algorithm_
+
+Use the channel argument key `GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM` (from
+[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)),
+valued by an integer corresponding to a value from the `grpc_compression_level`
+enum.
+
+## Per Call Settings
+
+### Compression **Level** in Call Responses
+
+The server requests a compression level via initial metadata. The
+`send_initial_metadata` `grpc_op` contains a `maybe_compression_level` field
+with two fields, `is_set` and `compression_level`. The former must be set when
+actively choosing a level to disambiguate the default value of zero (no
+compression) from the proactive selection of no compression.
+
+The core will receive the request for the compression level and automatically
+choose a compression algorithm based on its knowledge about the peer
+(communicated by the client via the `grpc-accept-encoding` header. Note that the
+absence of this header means no compression is supported by the client/peer).
+
+### Compression **Algorithm** in Call Responses
+
+**Server should avoid setting the compression algorithm directly**. Prefer
+setting compression levels unless there's a _very_ compelling reason to choose
+specific algorithms (benchmarking, testing).
+
+Selection of concrete compression algorithms is performed by adding a
+`(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, <algorithm-name>)` key-value pair to the
+initial metadata, where `GRPC_COMPRESS_REQUEST_ALGORITHM_KEY` is defined in
+[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)),
+and `<algorithm-name>` is the human readable name of the algorithm as given in
+[the HTTP2 spec](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md)
+for `Message-Encoding` (e.g. gzip, identity, etc.). See
+[`grpc_compression_algorithm_name`](https://github.com/grpc/grpc/blob/master/src/core/lib/compression/compression.c)
+for the mapping between the `grpc_compression_algorithm` enum values and their
+textual representation.
+
+## Per Message Settings
+
+To disable compression for a specific message, the `flags` field of `grpc_op`
+instances of type `GRPC_OP_SEND_MESSAGE` must have its `GRPC_WRITE_NO_COMPRESS`
+bit set. Refer to
+[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)),
diff --git a/doc/images/compression_cookbook_incoming.png b/doc/images/compression_cookbook_incoming.png
new file mode 100644
index 0000000000000000000000000000000000000000..84d6f558b623cc708777504d783fe2e427886bc2
Binary files /dev/null and b/doc/images/compression_cookbook_incoming.png differ
diff --git a/doc/images/compression_cookbook_outgoing.png b/doc/images/compression_cookbook_outgoing.png
new file mode 100644
index 0000000000000000000000000000000000000000..59b995d15421d3116ecbb86cabbfb41a4d8f96d9
Binary files /dev/null and b/doc/images/compression_cookbook_outgoing.png differ
diff --git a/doc/interop-test-descriptions.md b/doc/interop-test-descriptions.md
index 7fd21c7022ed6deaba9204637c0042fb82def218..1e04966380ce2841a8bbb997937a9e74a1eaf5f4 100644
--- a/doc/interop-test-descriptions.md
+++ b/doc/interop-test-descriptions.md
@@ -30,8 +30,7 @@ Clients should accept these arguments:
       [ca.pem](https://github.com/grpc/grpc/blob/master/src/core/lib/tsi/test_creds/ca.pem)
       as the CA root
 * --default_service_account=ACCOUNT_EMAIL
-    * Email of the GCE default service account. Only applicable
-      for compute_engine_creds test.
+    * Email of the GCE default service account.
 * --oauth_scope=SCOPE
     * OAuth scope. For example, "https://www.googleapis.com/auth/xapi.zoo"
 * --service_account_key_file=PATH
@@ -68,14 +67,12 @@ control (even if compression is enabled on the channel).
 
 Server features:
 * [UnaryCall][]
-* [Compressable Payload][]
 
 Procedure:
  1. Client calls UnaryCall with:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
@@ -85,56 +82,106 @@ Procedure:
 
 Client asserts:
 * call was successful
-* response payload type is COMPRESSABLE
 * response payload body is 314159 bytes in size
 * clients are free to assert that the response payload body contents are zero
   and comparing the entire response message against a golden response
 
-### large_compressed_unary
-
-This test verifies compressed unary calls succeed in sending messages. It
-sends one unary request for every payload type, with and without requesting a
-compressed response from the server.
-
-In all scenarios, whether compression was actually performed is determined by
-the compression bit in the response's message flags.
+### client_compressed_unary
 
+This test verifies the client can compress unary messages by sending two unary
+calls, for compressed and uncompressed payloads. It also sends an initial
+probing request to verify whether the server supports the [CompressedRequest][]
+feature by checking if the probing call fails with an `INVALID_ARGUMENT` status.
 
 Server features:
 * [UnaryCall][]
-* [Compressable Payload][]
-* [Uncompressable Payload][]
+* [CompressedRequest][]
 
 Procedure:
- 1. Client calls UnaryCall with:
+ 1. Client calls UnaryCall with the feature probe, an *uncompressed* message:
+    ```
+    {
+      expect_compressed:{
+        value: true
+      }
+      response_size: 314159
+      payload:{
+        body: 271828 bytes of zeros
+      }
+    }
+    ```
+
+ 1. Client calls UnaryCall with the *compressed* message:
+
+    ```
+    {
+      expect_compressed:{
+        value: true
+      }
+      response_size: 314159
+      payload:{
+        body: 271828 bytes of zeros
+      }
+    }
+    ```
+
+ 1. Client calls UnaryCall with the *uncompressed* message:
 
     ```
     {
-      request_compressed_response: bool
-      response_type: COMPRESSABLE
+      expect_compressed:{
+        value: false
+      }
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
       }
     }
     ```
+
     Client asserts:
-    * call was successful
-    * response payload type is COMPRESSABLE
-    * if `request_compressed_response` is false, the response MUST NOT have the
-      compressed message flag set.
-    * if `request_compressed_response` is true, the response MUST have the
-      compressed message flag set.
-    * response payload body is 314159 bytes in size
-    * clients are free to assert that the response payload body contents are
-      zero and comparing the entire response message against a golden response
+    * First call failed with `INVALID_ARGUMENT` status.
+    * Subsequent calls were successful.
+    * Response payload body is 314159 bytes in size.
+    * Clients are free to assert that the response payload body contents are
+      zeros and comparing the entire response message against a golden response.
 
 
- 2. Client calls UnaryCall with:
+### server_compressed_unary
+
+This test verifies the server can compress unary messages. It sends two unary
+requests, expecting the server's response to be compressed or not according to
+the `response_compressed` boolean.
+
+Whether compression was actually performed is determined by the compression bit
+in the response's message flags. *Note that some languages may not have access
+to the message flags*.
+
+
+Server features:
+* [UnaryCall][]
+* [CompressedResponse][]
+
+Procedure:
+ 1. Client calls UnaryCall with `SimpleRequest`:
+
+    ```
+    {
+      response_compressed:{
+        value: true
+      }
+      response_size: 314159
+      payload:{
+        body: 271828 bytes of zeros
+      }
+    }
+    ```
+
     ```
     {
-      request_compressed_response: bool
-      response_type: UNCOMPRESSABLE
+      response_compressed:{
+        value: false
+      }
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
@@ -143,11 +190,13 @@ Procedure:
     ```
     Client asserts:
     * call was successful
-    * response payload type is UNCOMPRESSABLE
-    * the response MAY have the compressed message flag set. Some
-      implementations will choose to compress the payload even when the output
-      size if larger than the input.
-    * response payload body is 314159 bytes in size
+    * when `response_compressed` is true, the response MUST have the
+      compressed message flag set.
+    * when `response_compressed` is false, the response MUST NOT have
+      the compressed message flag set.
+    * response payload body is 314159 bytes in size in both cases.
+    * clients are free to assert that the response payload body contents are
+      zero and comparing the entire response message against a golden response
 
 
 ### client_streaming
@@ -156,7 +205,6 @@ This test verifies that client-only streaming succeeds.
 
 Server features:
 * [StreamingInputCall][]
-* [Compressable Payload][]
 
 Procedure:
  1. Client calls StreamingInputCall
@@ -206,25 +254,81 @@ Client asserts:
 * call was successful
 * response aggregated_payload_size is 74922
 
+
+### client_compressed_streaming
+
+This test verifies the client can compress requests on per-message basis by
+performing a two-request streaming call. It also sends an initial probing
+request to verify whether the server supports the [CompressedRequest][] feature
+by checking if the probing call fails with an `INVALID_ARGUMENT` status.
+
+Procedure:
+ 1. Client calls `StreamingInputCall` and sends the following feature-probing
+    *uncompressed* `StreamingInputCallRequest` message
+
+    ```
+    {
+      expect_compressed:{
+        value: true
+      }
+      payload:{
+        body: 27182 bytes of zeros
+      }
+    }
+    ```
+    If the call fails with `INVALID_ARGUMENT`, the test fails. Otherwise, we
+    continue.
+
+ 1. Client calls `StreamingInputCall` again, sending the *compressed* message
+
+    ```
+    {
+      expect_compressed:{
+        value: true
+      }
+      payload:{
+        body: 27182 bytes of zeros
+      }
+    }
+    ```
+
+ 1. And finally, the *uncompressed* message
+    ```
+    {
+      expect_compressed:{
+        value: false
+      }
+      payload:{
+        body: 45904 bytes of zeros
+      }
+    }
+    ```
+
+ 1. Client half-closes
+
+Client asserts:
+* First call fails with `INVALID_ARGUMENT`.
+* Next calls succeeds.
+* Response aggregated payload size is 73086.
+
+
 ### server_streaming
 
 This test verifies that server-only streaming succeeds.
 
 Server features:
 * [StreamingOutputCall][]
-* [Compressable Payload][]
 
 Procedure:
- 1. Client calls StreamingOutputCall with:
+ 1. Client calls StreamingOutputCall with `StreamingOutputCallRequest`:
 
     ```
     {
-      response_type:COMPRESSABLE
       response_parameters:{
         size: 31415
       }
       response_parameters:{
-        size: 59
+        size: 9
       }
       response_parameters:{
         size: 2653
@@ -238,103 +342,64 @@ Procedure:
 Client asserts:
 * call was successful
 * exactly four responses
-* response payloads are COMPRESSABLE
 * response payload bodies are sized (in order): 31415, 9, 2653, 58979
 * clients are free to assert that the response payload body contents are zero
   and comparing the entire response messages against golden responses
 
 ### server_compressed_streaming
 
-This test verifies that server-only compressed streaming succeeds.
+This test verifies that the server can compress streaming messages and disable
+compression on individual messages.
 
 Server features:
 * [StreamingOutputCall][]
-* [Compressable Payload][]
-* [Uncompressable Payload][]
+* [CompressedResponse][]
 
 
 Procedure:
- 1. Client calls StreamingOutputCall with:
+ 1. Client calls StreamingOutputCall with `StreamingOutputCallRequest`:
 
     ```
     {
-      request_compressed_response: bool
-      response_type:COMPRESSABLE
       response_parameters:{
+        compressed: {
+          value: true
+        }
         size: 31415
       }
       response_parameters:{
-        size: 59
-      }
-      response_parameters:{
-        size: 2653
-      }
-      response_parameters:{
-        size: 58979
+        compressed: {
+          value: false
+        }
+        size: 92653
       }
     }
     ```
 
     Client asserts:
     * call was successful
-    * exactly four responses
-    * response payloads are COMPRESSABLE
-    * if `request_compressed_response` is false, the response's messages MUST
+    * exactly two responses
+    * when `response_compressed` is false, the response's messages MUST
       NOT have the compressed message flag set.
-    * if `request_compressed_response` is true, the response's messages MUST
+    * when `response_compressed` is true, the response's messages MUST
       have the compressed message flag set.
-    * response payload bodies are sized (in order): 31415, 59, 2653, 58979
+    * response payload bodies are sized (in order): 31415, 92653
     * clients are free to assert that the response payload body contents are
       zero and comparing the entire response messages against golden responses
 
 
- 2. Client calls StreamingOutputCall with:
-
-    ```
-    {
-      request_compressed_response: bool
-      response_type:UNCOMPRESSABLE
-      response_parameters:{
-        size: 31415
-      }
-      response_parameters:{
-        size: 59
-      }
-      response_parameters:{
-        size: 2653
-      }
-      response_parameters:{
-        size: 58979
-      }
-    }
-    ```
-
-    Client asserts:
-    * call was successful
-    * exactly four responses
-    * response payloads are UNCOMPRESSABLE
-    * the response MAY have the compressed message flag set. Some
-      implementations will choose to compress the payload even when the output
-      size if larger than the input.
-    * response payload bodies are sized (in order): 31415, 59, 2653, 58979
-    * clients are free to assert that the body of the responses are identical to
-      the golden uncompressable data at `test/cpp/interop/rnd.dat`.
-
-
 ### ping_pong
 
 This test verifies that full duplex bidi is supported.
 
 Server features:
 * [FullDuplexCall][]
-* [Compressable Payload][]
 
 Procedure:
  1. Client calls FullDuplexCall with:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_parameters:{
         size: 31415
       }
@@ -348,9 +413,8 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_parameters:{
-        size: 59
+        size: 9
       }
       payload:{
         body: 8 bytes of zeros
@@ -362,7 +426,6 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_parameters:{
         size: 2653
       }
@@ -376,7 +439,6 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_parameters:{
         size: 58979
       }
@@ -391,7 +453,6 @@ Procedure:
 Client asserts:
 * call was successful
 * exactly four responses
-* response payloads are COMPRESSABLE
 * response payload bodies are sized (in order): 31415, 9, 2653, 58979
 * clients are free to assert that the response payload body contents are zero
   and comparing the entire response messages against golden responses
@@ -421,12 +482,12 @@ with desired oauth scope.
 
 The test uses `--default_service_account` with GCE service account email and
 `--oauth_scope` with the OAuth scope to use. For testing against
-grpc-test.sandbox.googleapis.com, "https://www.googleapis.com/auth/xapi.zoo" should
+grpc-test.sandbox.googleapis.com, "https://www.googleapis.com/auth/xapi.zoo"
+should
 be passed in as `--oauth_scope`.
 
 Server features:
 * [UnaryCall][]
-* [Compressable Payload][]
 * [Echo Authenticated Username][]
 * [Echo OAuth Scope][]
 
@@ -436,7 +497,6 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
@@ -448,7 +508,8 @@ Procedure:
 
 Client asserts:
 * call was successful
-* received SimpleResponse.username equals the value of `--default_service_account` flag
+* received SimpleResponse.username equals the value of
+  `--default_service_account` flag
 * received SimpleResponse.oauth_scope is in `--oauth_scope`
 * response payload body is 314159 bytes in size
 * clients are free to assert that the response payload body contents are zero
@@ -469,7 +530,6 @@ variable GOOGLE_APPLICATION_CREDENTIALS.
 
 Server features:
 * [UnaryCall][]
-* [Compressable Payload][]
 * [Echo Authenticated Username][]
 * [Echo OAuth Scope][]
 
@@ -479,7 +539,6 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
@@ -492,7 +551,8 @@ Client asserts:
 * call was successful
 * received SimpleResponse.username is not empty and is in the json key file used
 by the auth library. The client can optionally check the username matches the
-email address in the key file or equals the value of `--default_service_account` flag.
+email address in the key file or equals the value of `--default_service_account`
+flag.
 * response payload body is 314159 bytes in size
 * clients are free to assert that the response payload body contents are zero
   and comparing the entire response message against a golden response
@@ -518,18 +578,18 @@ variable GOOGLE_APPLICATION_CREDENTIALS, *OR* if GCE credentials is used to
 fetch the token, `--default_service_account` can be used to pass in GCE service
 account email.
 - uses the flag `--oauth_scope` for the oauth scope.  For testing against
-grpc-test.sandbox.googleapis.com, "https://www.googleapis.com/auth/xapi.zoo" should
-be passed as the `--oauth_scope`.
+grpc-test.sandbox.googleapis.com, "https://www.googleapis.com/auth/xapi.zoo"
+should be passed as the `--oauth_scope`.
 
 Server features:
 * [UnaryCall][]
-* [Compressable Payload][]
 * [Echo Authenticated Username][]
 * [Echo OAuth Scope][]
 
 Procedure:
  1. Client uses the auth library to obtain an authorization token
- 2. Client configures the channel to use AccessTokenCredentials with the access token obtained in step 1
+ 2. Client configures the channel to use AccessTokenCredentials with the access
+    token obtained in step 1
  3. Client calls UnaryCall with the following message
 
     ```
@@ -550,22 +610,21 @@ json key file or GCE default service account email.
 
 Similar to the other auth tests, this test is only for cloud-to-prod path.
 
-This test verifies unary calls succeed in sending messages using a JWT or a service account
-credentials set on the RPC.
+This test verifies unary calls succeed in sending messages using a JWT or a
+service account credentials set on the RPC.
 
 The test
 - uses the flag `--service_account_key_file` with the path to a json key file
 downloaded from https://console.developers.google.com. Alternately, if using a
 usable auth implementation, it may specify the file location in the environment
 variable GOOGLE_APPLICATION_CREDENTIALS
-- optionally uses the flag `--oauth_scope` for the oauth scope if implementator 
+- optionally uses the flag `--oauth_scope` for the oauth scope if implementator
 wishes to use service account credential instead of JWT credential. For testing
-against grpc-test.sandbox.googleapis.com, oauth scope 
+against grpc-test.sandbox.googleapis.com, oauth scope
 "https://www.googleapis.com/auth/xapi.zoo" should be used.
 
 Server features:
 * [UnaryCall][]
-* [Compressable Payload][]
 * [Echo Authenticated Username][]
 * [Echo OAuth Scope][]
 
@@ -596,7 +655,6 @@ by the server.
 Server features:
 * [UnaryCall][]
 * [FullDuplexCall][]
-* [Compressable Payload][]
 * [Echo Metadata][]
 
 Procedure:
@@ -611,7 +669,6 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
@@ -630,7 +687,6 @@ Procedure:
 
     ```
     {
-      response_type: COMPRESSABLE
       response_size: 314159
       payload:{
         body: 271828 bytes of zeros
@@ -736,14 +792,12 @@ from the server.
 
 Server features:
 * [FullDuplexCall][]
-* [Compressable Payload][]
 
 Procedure:
  1. Client starts FullDuplexCall with
 
     ```
     {
-      response_type: COMPRESSABLE
       response_parameters:{
         size: 31415
       }
@@ -887,6 +941,21 @@ payload body of size `SimpleRequest.response_size` bytes and type as appropriate
 for the `SimpleRequest.response_type`. If the server does not support the
 `response_type`, then it should fail the RPC with `INVALID_ARGUMENT`.
 
+### CompressedResponse
+[CompressedResponse]: #compressedresponse
+
+When the client sets `response_compressed` to true, the server's response is
+sent back compressed. Note that `response_compressed` is present on both
+`SimpleRequest` (unary) and `StreamingOutputCallRequest` (streaming).
+
+### CompressedRequest
+[CompressedRequest]: #compressedrequest
+
+When the client sets `expect_compressed` to true, the server expects the client
+request to be compressed. If it's not, it fails the RPC with `INVALID_ARGUMENT`.
+Note that `response_compressed` is present on both `SimpleRequest` (unary) and
+`StreamingOutputCallRequest` (streaming).
+
 ### StreamingInputCall
 [StreamingInputCall]: #streaminginputcall
 
@@ -898,8 +967,8 @@ request payload bodies received.
 [StreamingOutputCall]: #streamingoutputcall
 
 Server implements StreamingOutputCall by replying, in order, with one
-StreamingOutputCallResponses for each ResponseParameters in
-StreamingOutputCallRequest. Each StreamingOutputCallResponses should have a
+StreamingOutputCallResponse for each ResponseParameters in
+StreamingOutputCallRequest. Each StreamingOutputCallResponse should have a
 payload body of size ResponseParameters.size bytes, as specified by its
 respective ResponseParameters. After sending all responses, it closes with OK.
 
@@ -907,26 +976,12 @@ respective ResponseParameters. After sending all responses, it closes with OK.
 [FullDuplexCall]: #fullduplexcall
 
 Server implements FullDuplexCall by replying, in order, with one
-StreamingOutputCallResponses for each ResponseParameters in each
-StreamingOutputCallRequest. Each StreamingOutputCallResponses should have a
+StreamingOutputCallResponse for each ResponseParameters in each
+StreamingOutputCallRequest. Each StreamingOutputCallResponse should have a
 payload body of size ResponseParameters.size bytes, as specified by its
 respective ResponseParameters. After receiving half close and sending all
 responses, it closes with OK.
 
-### Compressable Payload
-[Compressable Payload]: #compressable-payload
-
-When the client requests COMPRESSABLE payload, the response includes a payload
-of the size requested containing all zeros and the payload type is
-COMPRESSABLE.
-
-### Uncompressable Payload
-[Uncompressable Payload]: #uncompressable-payload
-
-When the client requests UNCOMPRESSABLE payload, the response includes a payload
-of the size requested containing uncompressable data and the payload type is
-UNCOMPRESSABLE.
-
 ### Echo Status
 [Echo Status]: #echo-status
 When the client sends a response_status in the request payload, the server closes
diff --git a/doc/naming.md b/doc/naming.md
index 5ad7e6622eddaad60c5ea1dd5a1d10a920c42b6a..d0c892e8d924ef53e6b5a876fbf0cb41bf353b69 100644
--- a/doc/naming.md
+++ b/doc/naming.md
@@ -16,8 +16,6 @@ Here, scheme indicates the name-system to be used. Example schemes to be support
 
 * `dns`
 
-* `zookeeper`
-
 * `etcd`
 
 Authority indicates some scheme-specific bootstrap information, e.g., for DNS, the authority may include the IP[:port] of the DNS server to use. Often, a DNS name may used as the authority, since the ability to resolve DNS names is already built into all gRPC client libraries.
@@ -30,23 +28,3 @@ The gRPC client library will switch on the scheme to pick the right resolver plu
 
 Resolvers should be able to contact the authority and get a resolution that they return back to the gRPC client library. The returned contents include a list of IP:port, an optional config and optional auth config data to be used for channel authentication. The plugin API allows the resolvers to continuously watch an endpoint_name and return updated resolutions as needed. 
 
-## Zookeeper
-
-Apache [ZooKeeper](https://zookeeper.apache.org/) is a popular solution for building name-systems. Curator is a service discovery system built on to of ZooKeeper. We propose to organize names hierarchically as `/path/service/instance` similar to Apache Curator.
-
-A fully-qualified ZooKeeper name used to construct a gRPC channel will look as follows:
-
-```
-zookeeper://host:port/path/service/instance
-```
-Here `zookeeper` is the scheme identifying the name-system. `host:port` identifies an authoritative name-server for this scheme (i.e., a  Zookeeper server). The host can be an IP address or a DNS name. 
-Finally `/path/service/instance` is the Zookeeper name to be resolved. 
-
-## Service Registration
-
-
-Service providers can register their services in Zookeeper by using a Zookeeper client.  
-
-Each service is a zookeeper node, and each instance is a child node of the corresponding service. For example, a MySQL service may have multiple instances, `/mysql/1`, `/mysql/2`, `/mysql/3`. The name of the service or instance, as well as an optional path is specified by the service provider.
-
-The data in service nodes is empty. Each instance node stores its address in the format of `host:port`, where host can be either hostname or IP address.
diff --git a/examples/cpp/README.md b/examples/cpp/README.md
index d93cbacf7b276c673f17595e62b64821c22b4236..3fa7ad4c7847addee712d183b900d85bd288fa86 100644
--- a/examples/cpp/README.md
+++ b/examples/cpp/README.md
@@ -14,7 +14,7 @@ following command:
 
 
 ```sh
-$ git clone https://github.com/grpc/grpc.git
+$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
 ```
 
 Change your current directory to examples/cpp/helloworld
diff --git a/examples/cpp/cpptutorial.md b/examples/cpp/cpptutorial.md
index ef9ca99c0fef6ccec3a399e930f5186e0a1e2518..80fef07192e332e8b233cf9e8221429c4fb9a5cf 100644
--- a/examples/cpp/cpptutorial.md
+++ b/examples/cpp/cpptutorial.md
@@ -20,7 +20,7 @@ With gRPC we can define our service once in a .proto file and implement clients
 
 The example code for our tutorial is in [examples/cpp/route_guide](route_guide). To download the example, clone this repository by running the following command:
 ```shell
-$ git clone https://github.com/grpc/grpc.git
+$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
 ```
 
 Then change your current directory to `examples/cpp/route_guide`:
diff --git a/examples/cpp/helloworld/Makefile b/examples/cpp/helloworld/Makefile
index 780e5e427a7677db21aacfc3f6976ca363e12f1d..b45b3c7ee0c3d0482b499910f54cae6923fb0b13 100644
--- a/examples/cpp/helloworld/Makefile
+++ b/examples/cpp/helloworld/Makefile
@@ -97,7 +97,7 @@ ifneq ($(HAS_VALID_PROTOC),true)
 	@echo "Please install Google protocol buffers 3.0.0 and its compiler."
 	@echo "You can find it here:"
 	@echo
-	@echo "   https://github.com/google/protobuf/releases/tag/v3.0.0-beta-2"
+	@echo "   https://github.com/google/protobuf/releases/tag/v3.0.0-beta-3.3"
 	@echo
 	@echo "Here is what I get when trying to evaluate your version of protoc:"
 	@echo
diff --git a/examples/cpp/helloworld/README.md b/examples/cpp/helloworld/README.md
index 04283eabc350f3529a2350d52dcee3202fc651ed..db953f53628bbbf6adfbe4e3b0003ada9f977aba 100644
--- a/examples/cpp/helloworld/README.md
+++ b/examples/cpp/helloworld/README.md
@@ -12,7 +12,7 @@ following command:
 
 
 ```sh
-$ git clone https://github.com/grpc/grpc.git
+$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
 ```
 
 Change your current directory to examples/cpp/helloworld
diff --git a/examples/cpp/helloworld/greeter_client.cc b/examples/cpp/helloworld/greeter_client.cc
index bf3b63cb5774752914d28e4662973db549c11466..12209f37dfd750267b025a6f7d8eec8c2621a0dc 100644
--- a/examples/cpp/helloworld/greeter_client.cc
+++ b/examples/cpp/helloworld/greeter_client.cc
@@ -72,6 +72,8 @@ class GreeterClient {
     if (status.ok()) {
       return reply.message();
     } else {
+      std::cout << status.error_code() << ": " << status.error_message()
+                << std::endl;
       return "RPC failed";
     }
   }
diff --git a/examples/cpp/route_guide/Makefile b/examples/cpp/route_guide/Makefile
index 11f2a00cc8909b2d4701bdd62e647f81e08cb280..50ecf041f56b6bbdab9da30496ad9344e62d895d 100644
--- a/examples/cpp/route_guide/Makefile
+++ b/examples/cpp/route_guide/Makefile
@@ -86,7 +86,7 @@ ifneq ($(HAS_VALID_PROTOC),true)
 	@echo "Please install Google protocol buffers 3.0.0 and its compiler."
 	@echo "You can find it here:"
 	@echo
-	@echo "   https://github.com/google/protobuf/releases/tag/v3.0.0-beta-2"
+	@echo "   https://github.com/google/protobuf/releases/tag/v3.0.0-beta-3.3"
 	@echo
 	@echo "Here is what I get when trying to evaluate your version of protoc:"
 	@echo
diff --git a/examples/csharp/helloworld/.nuget/packages.config b/examples/csharp/helloworld/.nuget/packages.config
index bfd6c6723d95020b03c1bc04531110b8a0e2cb96..aa060800c19d52a86e1ab820058cb56073772e23 100644
--- a/examples/csharp/helloworld/.nuget/packages.config
+++ b/examples/csharp/helloworld/.nuget/packages.config
@@ -1,4 +1,4 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Grpc.Tools" version="0.14.0" />
+  <package id="Grpc.Tools" version="0.15.0" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/Greeter/Greeter.csproj b/examples/csharp/helloworld/Greeter/Greeter.csproj
index 0270cc25f7a1e2d0f0040cc132fca6dd1d7a1e70..20b85db8b602140135f3357e05da88b616bb7ae6 100644
--- a/examples/csharp/helloworld/Greeter/Greeter.csproj
+++ b/examples/csharp/helloworld/Greeter/Greeter.csproj
@@ -10,7 +10,7 @@
     <RootNamespace>Greeter</RootNamespace>
     <AssemblyName>Greeter</AssemblyName>
     <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
-    <NuGetPackageImportStamp>745ac60f</NuGetPackageImportStamp>
+    <NuGetPackageImportStamp>2669b4f2</NuGetPackageImportStamp>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
     <DebugSymbols>true</DebugSymbols>
@@ -33,11 +33,11 @@
   <ItemGroup>
     <Reference Include="Google.Protobuf, Version=3.0.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Google.Protobuf.3.0.0-beta2\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
+      <HintPath>..\packages\Google.Protobuf.3.0.0-beta3\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Grpc.Core.0.14.0\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.0.15.0\lib\net45\Grpc.Core.dll</HintPath>
     </Reference>
     <Reference Include="System" />
     <Reference Include="System.Interactive.Async, Version=1.2.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35, processorArchitecture=MSIL">
@@ -61,11 +61,11 @@
     <None Include="packages.config" />
   </ItemGroup>
   <ItemGroup />
-  <Import Project="..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/Greeter/Helloworld.cs b/examples/csharp/helloworld/Greeter/Helloworld.cs
index 3cacdebe27ebf3855c5366eabfc5e7624a66c23b..6477b4f35be82ac49c60d2c9c661533143d56a06 100644
--- a/examples/csharp/helloworld/Greeter/Helloworld.cs
+++ b/examples/csharp/helloworld/Greeter/Helloworld.cs
@@ -31,9 +31,9 @@ namespace Helloworld {
             "cm90bzM="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
           new pbr::FileDescriptor[] { },
-          new pbr::GeneratedCodeInfo(null, new pbr::GeneratedCodeInfo[] {
-            new pbr::GeneratedCodeInfo(typeof(global::Helloworld.HelloRequest), global::Helloworld.HelloRequest.Parser, new[]{ "Name" }, null, null, null),
-            new pbr::GeneratedCodeInfo(typeof(global::Helloworld.HelloReply), global::Helloworld.HelloReply.Parser, new[]{ "Message" }, null, null, null)
+          new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+            new pbr::GeneratedClrTypeInfo(typeof(global::Helloworld.HelloRequest), global::Helloworld.HelloRequest.Parser, new[]{ "Name" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Helloworld.HelloReply), global::Helloworld.HelloReply.Parser, new[]{ "Message" }, null, null, null)
           }));
     }
     #endregion
@@ -76,7 +76,7 @@ namespace Helloworld {
     public string Name {
       get { return name_; }
       set {
-        name_ = pb::Preconditions.CheckNotNull(value, "value");
+        name_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
       }
     }
 
@@ -182,7 +182,7 @@ namespace Helloworld {
     public string Message {
       get { return message_; }
       set {
-        message_ = pb::Preconditions.CheckNotNull(value, "value");
+        message_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
       }
     }
 
diff --git a/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs b/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
index 405f3bd72481043eb75c9d01e28c7980b656926c..041f5a78d758f9d4e3653ff013ea77a4a4f36415 100644
--- a/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
+++ b/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
@@ -61,38 +61,6 @@ namespace Helloworld {
       get { return global::Helloworld.HelloworldReflection.Descriptor.Services[0]; }
     }
 
-    /// <summary>Client for Greeter</summary>
-    [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
-    public interface IGreeterClient
-    {
-      /// <summary>
-      ///  Sends a greeting
-      /// </summary>
-      global::Helloworld.HelloReply SayHello(global::Helloworld.HelloRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  Sends a greeting
-      /// </summary>
-      global::Helloworld.HelloReply SayHello(global::Helloworld.HelloRequest request, CallOptions options);
-      /// <summary>
-      ///  Sends a greeting
-      /// </summary>
-      AsyncUnaryCall<global::Helloworld.HelloReply> SayHelloAsync(global::Helloworld.HelloRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  Sends a greeting
-      /// </summary>
-      AsyncUnaryCall<global::Helloworld.HelloReply> SayHelloAsync(global::Helloworld.HelloRequest request, CallOptions options);
-    }
-
-    /// <summary>Interface of server-side implementations of Greeter</summary>
-    [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
-    public interface IGreeter
-    {
-      /// <summary>
-      ///  Sends a greeting
-      /// </summary>
-      global::System.Threading.Tasks.Task<global::Helloworld.HelloReply> SayHello(global::Helloworld.HelloRequest request, ServerCallContext context);
-    }
-
     /// <summary>Base class for server-side implementations of Greeter</summary>
     public abstract class GreeterBase
     {
@@ -107,21 +75,24 @@ namespace Helloworld {
     }
 
     /// <summary>Client for Greeter</summary>
-    #pragma warning disable 0618
-    public class GreeterClient : ClientBase<GreeterClient>, IGreeterClient
-    #pragma warning restore 0618
+    public class GreeterClient : ClientBase<GreeterClient>
     {
+      /// <summary>Creates a new client for Greeter</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public GreeterClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for Greeter that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public GreeterClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected GreeterClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected GreeterClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -160,27 +131,10 @@ namespace Helloworld {
       }
     }
 
-    /// <summary>Creates a new client for Greeter</summary>
-    public static GreeterClient NewClient(Channel channel)
-    {
-      return new GreeterClient(channel);
-    }
-
-    /// <summary>Creates service definition that can be registered with a server</summary>
-    #pragma warning disable 0618
-    public static ServerServiceDefinition BindService(IGreeter serviceImpl)
-    #pragma warning restore 0618
-    {
-      return ServerServiceDefinition.CreateBuilder(__ServiceName)
-          .AddMethod(__Method_SayHello, serviceImpl.SayHello).Build();
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
-    #pragma warning disable 0618
     public static ServerServiceDefinition BindService(GreeterBase serviceImpl)
-    #pragma warning restore 0618
     {
-      return ServerServiceDefinition.CreateBuilder(__ServiceName)
+      return ServerServiceDefinition.CreateBuilder()
           .AddMethod(__Method_SayHello, serviceImpl.SayHello).Build();
     }
 
diff --git a/examples/csharp/helloworld/Greeter/packages.config b/examples/csharp/helloworld/Greeter/packages.config
index 617fe6da7beb3b90c109e843d7ecd1ac94046067..ff9d6bbf73f4dd8dd8f5be0e8fd722dbf2ea8e06 100644
--- a/examples/csharp/helloworld/Greeter/packages.config
+++ b/examples/csharp/helloworld/Greeter/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.0.0-beta2" targetFramework="net45" />
-  <package id="Grpc" version="0.14.0" targetFramework="net45" />
-  <package id="Grpc.Core" version="0.14.0" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.0.0-beta3" targetFramework="net45" />
+  <package id="Grpc" version="0.15.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="0.15.0" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.5" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj b/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
index 877c450a50e584e4afed95d47508979799e44f0a..2b38ce290e9466db2ee9c6d1ae5328fd3697c20b 100644
--- a/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
+++ b/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
@@ -10,7 +10,7 @@
     <RootNamespace>GreeterClient</RootNamespace>
     <AssemblyName>GreeterClient</AssemblyName>
     <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
-    <NuGetPackageImportStamp>63b59176</NuGetPackageImportStamp>
+    <NuGetPackageImportStamp>5e942a7d</NuGetPackageImportStamp>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
     <DebugSymbols>true</DebugSymbols>
@@ -33,11 +33,11 @@
   <ItemGroup>
     <Reference Include="Google.Protobuf, Version=3.0.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Google.Protobuf.3.0.0-beta2\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
+      <HintPath>..\packages\Google.Protobuf.3.0.0-beta3\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Grpc.Core.0.14.0\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.0.15.0\lib\net45\Grpc.Core.dll</HintPath>
     </Reference>
     <Reference Include="System" />
     <Reference Include="System.Interactive.Async, Version=1.2.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35, processorArchitecture=MSIL">
@@ -59,11 +59,11 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <Import Project="..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterClient/Program.cs b/examples/csharp/helloworld/GreeterClient/Program.cs
index 4393f2f3c2916a03a0702cb331e0d7463196d441..444d47350954b4cb2c006197e584bb04105762d4 100644
--- a/examples/csharp/helloworld/GreeterClient/Program.cs
+++ b/examples/csharp/helloworld/GreeterClient/Program.cs
@@ -39,7 +39,7 @@ namespace GreeterClient
         {
             Channel channel = new Channel("127.0.0.1:50051", ChannelCredentials.Insecure);
 
-            var client = Greeter.NewClient(channel);
+            var client = new Greeter.GreeterClient(channel);
             String user = "you";
 
             var reply = client.SayHello(new HelloRequest { Name = user });
diff --git a/examples/csharp/helloworld/GreeterClient/packages.config b/examples/csharp/helloworld/GreeterClient/packages.config
index 617fe6da7beb3b90c109e843d7ecd1ac94046067..ff9d6bbf73f4dd8dd8f5be0e8fd722dbf2ea8e06 100644
--- a/examples/csharp/helloworld/GreeterClient/packages.config
+++ b/examples/csharp/helloworld/GreeterClient/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.0.0-beta2" targetFramework="net45" />
-  <package id="Grpc" version="0.14.0" targetFramework="net45" />
-  <package id="Grpc.Core" version="0.14.0" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.0.0-beta3" targetFramework="net45" />
+  <package id="Grpc" version="0.15.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="0.15.0" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.5" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj b/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
index 4d792dcf32e042af49e673d497a0f15f38e8c627..43c633678b1eda2d97d8cb230e0e9eff8a08a961 100644
--- a/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
+++ b/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
@@ -10,7 +10,7 @@
     <RootNamespace>GreeterServer</RootNamespace>
     <AssemblyName>GreeterServer</AssemblyName>
     <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
-    <NuGetPackageImportStamp>25ac2e80</NuGetPackageImportStamp>
+    <NuGetPackageImportStamp>9c7b2963</NuGetPackageImportStamp>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
     <DebugSymbols>true</DebugSymbols>
@@ -33,11 +33,11 @@
   <ItemGroup>
     <Reference Include="Google.Protobuf, Version=3.0.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Google.Protobuf.3.0.0-beta2\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
+      <HintPath>..\packages\Google.Protobuf.3.0.0-beta3\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Grpc.Core.0.14.0\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.0.15.0\lib\net45\Grpc.Core.dll</HintPath>
     </Reference>
     <Reference Include="System" />
     <Reference Include="System.Interactive.Async, Version=1.2.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35, processorArchitecture=MSIL">
@@ -59,11 +59,11 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <Import Project="..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterServer/packages.config b/examples/csharp/helloworld/GreeterServer/packages.config
index 617fe6da7beb3b90c109e843d7ecd1ac94046067..ff9d6bbf73f4dd8dd8f5be0e8fd722dbf2ea8e06 100644
--- a/examples/csharp/helloworld/GreeterServer/packages.config
+++ b/examples/csharp/helloworld/GreeterServer/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.0.0-beta2" targetFramework="net45" />
-  <package id="Grpc" version="0.14.0" targetFramework="net45" />
-  <package id="Grpc.Core" version="0.14.0" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.0.0-beta3" targetFramework="net45" />
+  <package id="Grpc" version="0.15.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="0.15.0" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.5" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/README.md b/examples/csharp/helloworld/README.md
index 63131ed98c0004ff464c36e91a1549318890e557..d13c9ac9db418377c6822054a16ab6f6e94b6c71 100644
--- a/examples/csharp/helloworld/README.md
+++ b/examples/csharp/helloworld/README.md
@@ -5,23 +5,16 @@ BACKGROUND
 -------------
 For this sample, we've already generated the server and client stubs from [helloworld.proto][].
 
-Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/)
+Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/), [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/)
 and [Google.Protobuf](https://www.nuget.org/packages/Google.Protobuf/) NuGet packages
 which have been already added to the project for you.
 
 PREREQUISITES
 -------------
-**Windows**
-- .NET 4.5+
-- Visual Studio 2013 or 2015
 
-**Linux**
-- Mono 4.0+
-- Monodevelop 5.9+ (with NuGet plugin installed)
-
-**Mac OS X**
-- Xamarin Studio 5.9+
-- [homebrew][]
+- Windows: .NET Framework 4.5+, Visual Studio 2013 or 2015
+- Linux: Mono 4+, MonoDevelop 5.9+ (with NuGet add-in installed)
+- Mac OS X: Xamarin Studio 5.9+
 
 BUILD
 -------
@@ -56,6 +49,5 @@ Tutorial
 
 You can find a more detailed tutorial in [gRPC Basics: C#][]
 
-[homebrew]:http://brew.sh
 [helloworld.proto]:../../protos/helloworld.proto
 [gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html
diff --git a/examples/csharp/helloworld/generate_protos.bat b/examples/csharp/helloworld/generate_protos.bat
index b9f68d6745abd7c881d0b779e07dcea12829cccd..a952bb46cdcc5144422bb570c319d40db4b7fe4a 100644
--- a/examples/csharp/helloworld/generate_protos.bat
+++ b/examples/csharp/helloworld/generate_protos.bat
@@ -34,7 +34,7 @@ setlocal
 @rem enter this directory
 cd /d %~dp0
 
-set TOOLS_PATH=packages\Grpc.Tools.0.14.0\tools\windows_x86
+set TOOLS_PATH=packages\Grpc.Tools.0.15.0\tools\windows_x86
 
 %TOOLS_PATH%\protoc.exe -I../../protos --csharp_out Greeter  ../../protos/helloworld.proto --grpc_out Greeter --plugin=protoc-gen-grpc=%TOOLS_PATH%\grpc_csharp_plugin.exe
 
diff --git a/examples/csharp/route_guide/.nuget/packages.config b/examples/csharp/route_guide/.nuget/packages.config
index bfd6c6723d95020b03c1bc04531110b8a0e2cb96..aa060800c19d52a86e1ab820058cb56073772e23 100644
--- a/examples/csharp/route_guide/.nuget/packages.config
+++ b/examples/csharp/route_guide/.nuget/packages.config
@@ -1,4 +1,4 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Grpc.Tools" version="0.14.0" />
+  <package id="Grpc.Tools" version="0.15.0" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuide.cs b/examples/csharp/route_guide/RouteGuide/RouteGuide.cs
index bcd77ec5f52f15adb19c6746345b5ece016d1b92..446113de9d5c387f67f106059f2a52031f5d0359 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuide.cs
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuide.cs
@@ -41,12 +41,12 @@ namespace Routeguide {
             "ZUIPUm91dGVHdWlkZVByb3RvUAGiAgNSVEdiBnByb3RvMw=="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
           new pbr::FileDescriptor[] { },
-          new pbr::GeneratedCodeInfo(null, new pbr::GeneratedCodeInfo[] {
-            new pbr::GeneratedCodeInfo(typeof(global::Routeguide.Point), global::Routeguide.Point.Parser, new[]{ "Latitude", "Longitude" }, null, null, null),
-            new pbr::GeneratedCodeInfo(typeof(global::Routeguide.Rectangle), global::Routeguide.Rectangle.Parser, new[]{ "Lo", "Hi" }, null, null, null),
-            new pbr::GeneratedCodeInfo(typeof(global::Routeguide.Feature), global::Routeguide.Feature.Parser, new[]{ "Name", "Location" }, null, null, null),
-            new pbr::GeneratedCodeInfo(typeof(global::Routeguide.RouteNote), global::Routeguide.RouteNote.Parser, new[]{ "Location", "Message" }, null, null, null),
-            new pbr::GeneratedCodeInfo(typeof(global::Routeguide.RouteSummary), global::Routeguide.RouteSummary.Parser, new[]{ "PointCount", "FeatureCount", "Distance", "ElapsedTime" }, null, null, null)
+          new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+            new pbr::GeneratedClrTypeInfo(typeof(global::Routeguide.Point), global::Routeguide.Point.Parser, new[]{ "Latitude", "Longitude" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Routeguide.Rectangle), global::Routeguide.Rectangle.Parser, new[]{ "Lo", "Hi" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Routeguide.Feature), global::Routeguide.Feature.Parser, new[]{ "Name", "Location" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Routeguide.RouteNote), global::Routeguide.RouteNote.Parser, new[]{ "Location", "Message" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Routeguide.RouteSummary), global::Routeguide.RouteSummary.Parser, new[]{ "PointCount", "FeatureCount", "Distance", "ElapsedTime" }, null, null, null)
           }));
     }
     #endregion
@@ -383,7 +383,7 @@ namespace Routeguide {
     public string Name {
       get { return name_; }
       set {
-        name_ = pb::Preconditions.CheckNotNull(value, "value");
+        name_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
       }
     }
 
@@ -541,7 +541,7 @@ namespace Routeguide {
     public string Message {
       get { return message_; }
       set {
-        message_ = pb::Preconditions.CheckNotNull(value, "value");
+        message_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
       }
     }
 
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj b/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
index 4f7222ebba33febe7d92480e91ccd84887696ff0..601d16ba24d8214252b0884a5069d102183f8f3f 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
@@ -11,7 +11,7 @@
     <AssemblyName>RouteGuide</AssemblyName>
     <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
     <FileAlignment>512</FileAlignment>
-    <NuGetPackageImportStamp>0a9fcb7a</NuGetPackageImportStamp>
+    <NuGetPackageImportStamp>de2137f9</NuGetPackageImportStamp>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
     <DebugSymbols>true</DebugSymbols>
@@ -33,11 +33,11 @@
   <ItemGroup>
     <Reference Include="Google.Protobuf, Version=3.0.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Google.Protobuf.3.0.0-beta2\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
+      <HintPath>..\packages\Google.Protobuf.3.0.0-beta3\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Grpc.Core.0.14.0\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.0.15.0\lib\net45\Grpc.Core.dll</HintPath>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
@@ -74,12 +74,12 @@
     </None>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs b/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
index a59906d9c13654ccf7b0367ba20ecf147a4be6ed..eb70c8e2db5b81cce75a8f1314261f87a0790e86 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
@@ -85,132 +85,6 @@ namespace Routeguide {
       get { return global::Routeguide.RouteGuideReflection.Descriptor.Services[0]; }
     }
 
-    /// <summary>Client for RouteGuide</summary>
-    [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
-    public interface IRouteGuideClient
-    {
-      /// <summary>
-      ///  A simple RPC.
-      ///
-      ///  Obtains the feature at a given position.
-      ///
-      ///  A feature with an empty name is returned if there's no feature at the given
-      ///  position.
-      /// </summary>
-      global::Routeguide.Feature GetFeature(global::Routeguide.Point request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  A simple RPC.
-      ///
-      ///  Obtains the feature at a given position.
-      ///
-      ///  A feature with an empty name is returned if there's no feature at the given
-      ///  position.
-      /// </summary>
-      global::Routeguide.Feature GetFeature(global::Routeguide.Point request, CallOptions options);
-      /// <summary>
-      ///  A simple RPC.
-      ///
-      ///  Obtains the feature at a given position.
-      ///
-      ///  A feature with an empty name is returned if there's no feature at the given
-      ///  position.
-      /// </summary>
-      AsyncUnaryCall<global::Routeguide.Feature> GetFeatureAsync(global::Routeguide.Point request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  A simple RPC.
-      ///
-      ///  Obtains the feature at a given position.
-      ///
-      ///  A feature with an empty name is returned if there's no feature at the given
-      ///  position.
-      /// </summary>
-      AsyncUnaryCall<global::Routeguide.Feature> GetFeatureAsync(global::Routeguide.Point request, CallOptions options);
-      /// <summary>
-      ///  A server-to-client streaming RPC.
-      ///
-      ///  Obtains the Features available within the given Rectangle.  Results are
-      ///  streamed rather than returned at once (e.g. in a response message with a
-      ///  repeated field), as the rectangle may cover a large area and contain a
-      ///  huge number of features.
-      /// </summary>
-      AsyncServerStreamingCall<global::Routeguide.Feature> ListFeatures(global::Routeguide.Rectangle request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  A server-to-client streaming RPC.
-      ///
-      ///  Obtains the Features available within the given Rectangle.  Results are
-      ///  streamed rather than returned at once (e.g. in a response message with a
-      ///  repeated field), as the rectangle may cover a large area and contain a
-      ///  huge number of features.
-      /// </summary>
-      AsyncServerStreamingCall<global::Routeguide.Feature> ListFeatures(global::Routeguide.Rectangle request, CallOptions options);
-      /// <summary>
-      ///  A client-to-server streaming RPC.
-      ///
-      ///  Accepts a stream of Points on a route being traversed, returning a
-      ///  RouteSummary when traversal is completed.
-      /// </summary>
-      AsyncClientStreamingCall<global::Routeguide.Point, global::Routeguide.RouteSummary> RecordRoute(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  A client-to-server streaming RPC.
-      ///
-      ///  Accepts a stream of Points on a route being traversed, returning a
-      ///  RouteSummary when traversal is completed.
-      /// </summary>
-      AsyncClientStreamingCall<global::Routeguide.Point, global::Routeguide.RouteSummary> RecordRoute(CallOptions options);
-      /// <summary>
-      ///  A Bidirectional streaming RPC.
-      ///
-      ///  Accepts a stream of RouteNotes sent while a route is being traversed,
-      ///  while receiving other RouteNotes (e.g. from other users).
-      /// </summary>
-      AsyncDuplexStreamingCall<global::Routeguide.RouteNote, global::Routeguide.RouteNote> RouteChat(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
-      /// <summary>
-      ///  A Bidirectional streaming RPC.
-      ///
-      ///  Accepts a stream of RouteNotes sent while a route is being traversed,
-      ///  while receiving other RouteNotes (e.g. from other users).
-      /// </summary>
-      AsyncDuplexStreamingCall<global::Routeguide.RouteNote, global::Routeguide.RouteNote> RouteChat(CallOptions options);
-    }
-
-    /// <summary>Interface of server-side implementations of RouteGuide</summary>
-    [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
-    public interface IRouteGuide
-    {
-      /// <summary>
-      ///  A simple RPC.
-      ///
-      ///  Obtains the feature at a given position.
-      ///
-      ///  A feature with an empty name is returned if there's no feature at the given
-      ///  position.
-      /// </summary>
-      global::System.Threading.Tasks.Task<global::Routeguide.Feature> GetFeature(global::Routeguide.Point request, ServerCallContext context);
-      /// <summary>
-      ///  A server-to-client streaming RPC.
-      ///
-      ///  Obtains the Features available within the given Rectangle.  Results are
-      ///  streamed rather than returned at once (e.g. in a response message with a
-      ///  repeated field), as the rectangle may cover a large area and contain a
-      ///  huge number of features.
-      /// </summary>
-      global::System.Threading.Tasks.Task ListFeatures(global::Routeguide.Rectangle request, IServerStreamWriter<global::Routeguide.Feature> responseStream, ServerCallContext context);
-      /// <summary>
-      ///  A client-to-server streaming RPC.
-      ///
-      ///  Accepts a stream of Points on a route being traversed, returning a
-      ///  RouteSummary when traversal is completed.
-      /// </summary>
-      global::System.Threading.Tasks.Task<global::Routeguide.RouteSummary> RecordRoute(IAsyncStreamReader<global::Routeguide.Point> requestStream, ServerCallContext context);
-      /// <summary>
-      ///  A Bidirectional streaming RPC.
-      ///
-      ///  Accepts a stream of RouteNotes sent while a route is being traversed,
-      ///  while receiving other RouteNotes (e.g. from other users).
-      /// </summary>
-      global::System.Threading.Tasks.Task RouteChat(IAsyncStreamReader<global::Routeguide.RouteNote> requestStream, IServerStreamWriter<global::Routeguide.RouteNote> responseStream, ServerCallContext context);
-    }
-
     /// <summary>Base class for server-side implementations of RouteGuide</summary>
     public abstract class RouteGuideBase
     {
@@ -265,21 +139,24 @@ namespace Routeguide {
     }
 
     /// <summary>Client for RouteGuide</summary>
-    #pragma warning disable 0618
-    public class RouteGuideClient : ClientBase<RouteGuideClient>, IRouteGuideClient
-    #pragma warning restore 0618
+    public class RouteGuideClient : ClientBase<RouteGuideClient>
     {
+      /// <summary>Creates a new client for RouteGuide</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public RouteGuideClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for RouteGuide that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public RouteGuideClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected RouteGuideClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected RouteGuideClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -402,30 +279,10 @@ namespace Routeguide {
       }
     }
 
-    /// <summary>Creates a new client for RouteGuide</summary>
-    public static RouteGuideClient NewClient(Channel channel)
-    {
-      return new RouteGuideClient(channel);
-    }
-
-    /// <summary>Creates service definition that can be registered with a server</summary>
-    #pragma warning disable 0618
-    public static ServerServiceDefinition BindService(IRouteGuide serviceImpl)
-    #pragma warning restore 0618
-    {
-      return ServerServiceDefinition.CreateBuilder(__ServiceName)
-          .AddMethod(__Method_GetFeature, serviceImpl.GetFeature)
-          .AddMethod(__Method_ListFeatures, serviceImpl.ListFeatures)
-          .AddMethod(__Method_RecordRoute, serviceImpl.RecordRoute)
-          .AddMethod(__Method_RouteChat, serviceImpl.RouteChat).Build();
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
-    #pragma warning disable 0618
     public static ServerServiceDefinition BindService(RouteGuideBase serviceImpl)
-    #pragma warning restore 0618
     {
-      return ServerServiceDefinition.CreateBuilder(__ServiceName)
+      return ServerServiceDefinition.CreateBuilder()
           .AddMethod(__Method_GetFeature, serviceImpl.GetFeature)
           .AddMethod(__Method_ListFeatures, serviceImpl.ListFeatures)
           .AddMethod(__Method_RecordRoute, serviceImpl.RecordRoute)
diff --git a/examples/csharp/route_guide/RouteGuide/packages.config b/examples/csharp/route_guide/RouteGuide/packages.config
index b16bfedd310422355de704a7cadd4a363f682e4c..b962a7232a93c4283d675b79631d2b234b07565c 100644
--- a/examples/csharp/route_guide/RouteGuide/packages.config
+++ b/examples/csharp/route_guide/RouteGuide/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.0.0-beta2" targetFramework="net45" />
-  <package id="Grpc" version="0.14.0" targetFramework="net45" />
-  <package id="Grpc.Core" version="0.14.0" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.0.0-beta3" targetFramework="net45" />
+  <package id="Grpc" version="0.15.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="0.15.0" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.5" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuideClient/Program.cs b/examples/csharp/route_guide/RouteGuideClient/Program.cs
index ee51fbe8e04776ee67e1b54249257fdd7b3561a9..8a173dcc3b3365f600b54beaee8e5e0595e85604 100644
--- a/examples/csharp/route_guide/RouteGuideClient/Program.cs
+++ b/examples/csharp/route_guide/RouteGuideClient/Program.cs
@@ -231,7 +231,7 @@ namespace Routeguide
         static void Main(string[] args)
         {
             var channel = new Channel("127.0.0.1:50052", ChannelCredentials.Insecure);
-            var client = new RouteGuideClient(RouteGuide.NewClient(channel));
+            var client = new RouteGuideClient(new RouteGuide.RouteGuideClient(channel));
 
             // Looking for a valid feature
             client.GetFeature(409146138, -746188906);
diff --git a/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj b/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
index c3700f65c6de1eb9abb4ad5d7befcde3197cdc04..6f5c0a50dd41349153eaa5b832a65b3bd876e4b1 100644
--- a/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
+++ b/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
@@ -11,7 +11,7 @@
     <AssemblyName>RouteGuideClient</AssemblyName>
     <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
     <FileAlignment>512</FileAlignment>
-    <NuGetPackageImportStamp>8ef088f0</NuGetPackageImportStamp>
+    <NuGetPackageImportStamp>b880049a</NuGetPackageImportStamp>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
     <PlatformTarget>AnyCPU</PlatformTarget>
@@ -35,11 +35,11 @@
   <ItemGroup>
     <Reference Include="Google.Protobuf, Version=3.0.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Google.Protobuf.3.0.0-beta2\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
+      <HintPath>..\packages\Google.Protobuf.3.0.0-beta3\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Grpc.Core.0.14.0\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.0.15.0\lib\net45\Grpc.Core.dll</HintPath>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
@@ -71,12 +71,12 @@
     </ProjectReference>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuideClient/packages.config b/examples/csharp/route_guide/RouteGuideClient/packages.config
index b16bfedd310422355de704a7cadd4a363f682e4c..b962a7232a93c4283d675b79631d2b234b07565c 100644
--- a/examples/csharp/route_guide/RouteGuideClient/packages.config
+++ b/examples/csharp/route_guide/RouteGuideClient/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.0.0-beta2" targetFramework="net45" />
-  <package id="Grpc" version="0.14.0" targetFramework="net45" />
-  <package id="Grpc.Core" version="0.14.0" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.0.0-beta3" targetFramework="net45" />
+  <package id="Grpc" version="0.15.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="0.15.0" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.5" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj b/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
index e7ecbc150f1d7509099ecad31312621f012fab88..5bf46b05b89f94976f5e86053b67d759055a8883 100644
--- a/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
+++ b/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
@@ -11,7 +11,7 @@
     <AssemblyName>RouteGuideServer</AssemblyName>
     <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
     <FileAlignment>512</FileAlignment>
-    <NuGetPackageImportStamp>d5246467</NuGetPackageImportStamp>
+    <NuGetPackageImportStamp>946ecc79</NuGetPackageImportStamp>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
     <PlatformTarget>AnyCPU</PlatformTarget>
@@ -35,11 +35,11 @@
   <ItemGroup>
     <Reference Include="Google.Protobuf, Version=3.0.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Google.Protobuf.3.0.0-beta2\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
+      <HintPath>..\packages\Google.Protobuf.3.0.0-beta3\lib\portable-net45+netcore45+wpa81+wp8\Google.Protobuf.dll</HintPath>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
-      <HintPath>..\packages\Grpc.Core.0.14.0\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.0.15.0\lib\net45\Grpc.Core.dll</HintPath>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
       <SpecificVersion>False</SpecificVersion>
@@ -72,12 +72,12 @@
     </ProjectReference>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.14.0\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.0.15.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuideServer/packages.config b/examples/csharp/route_guide/RouteGuideServer/packages.config
index b16bfedd310422355de704a7cadd4a363f682e4c..b962a7232a93c4283d675b79631d2b234b07565c 100644
--- a/examples/csharp/route_guide/RouteGuideServer/packages.config
+++ b/examples/csharp/route_guide/RouteGuideServer/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.0.0-beta2" targetFramework="net45" />
-  <package id="Grpc" version="0.14.0" targetFramework="net45" />
-  <package id="Grpc.Core" version="0.14.0" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.0.0-beta3" targetFramework="net45" />
+  <package id="Grpc" version="0.15.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="0.15.0" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.5" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/generate_protos.bat b/examples/csharp/route_guide/generate_protos.bat
index 72c5ba3e99f9f8e7cf45f0b9c0910013cf5e58a5..dbdbd1f172fe817e4855e4a0ca87e128046569f1 100644
--- a/examples/csharp/route_guide/generate_protos.bat
+++ b/examples/csharp/route_guide/generate_protos.bat
@@ -34,7 +34,7 @@ setlocal
 @rem enter this directory
 cd /d %~dp0
 
-set TOOLS_PATH=packages\Grpc.Tools.0.14.0\tools\windows_x86
+set TOOLS_PATH=packages\Grpc.Tools.0.15.0\tools\windows_x86
 
 %TOOLS_PATH%\protoc.exe -I../../protos --csharp_out RouteGuide  ../../protos/route_guide.proto --grpc_out RouteGuide --plugin=protoc-gen-grpc=%TOOLS_PATH%\grpc_csharp_plugin.exe
 
diff --git a/examples/node/README.md b/examples/node/README.md
index 59fb4a17f59139b3ea117f15f9e9213b1815e3f4..b92ca383c4718f56fd463b2d1a9e0bd835da57bb 100644
--- a/examples/node/README.md
+++ b/examples/node/README.md
@@ -12,7 +12,7 @@ INSTALL
    ```sh
    $ # Get the gRPC repository
    $ export REPO_ROOT=grpc # REPO root can be any directory of your choice
-   $ git clone https://github.com/grpc/grpc.git $REPO_ROOT
+   $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc $REPO_ROOT
    $ cd $REPO_ROOT
 
    $ cd examples/node
diff --git a/examples/objective-c/auth_sample/AuthSample.xcodeproj/project.pbxproj b/examples/objective-c/auth_sample/AuthSample.xcodeproj/project.pbxproj
index 51a39c578c743194bcf2e510c3bd9fd9c05ad212..ab7419c9bcd3568432b59dc8477704d566308fc5 100644
--- a/examples/objective-c/auth_sample/AuthSample.xcodeproj/project.pbxproj
+++ b/examples/objective-c/auth_sample/AuthSample.xcodeproj/project.pbxproj
@@ -116,11 +116,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 63E1E9A21B28CB2100EF0978 /* Build configuration list for PBXNativeTarget "AuthSample" */;
 			buildPhases = (
-				DAABBA7B5788A39108D7CA83 /* Check Pods Manifest.lock */,
+				DAABBA7B5788A39108D7CA83 /* [CP] Check Pods Manifest.lock */,
 				63E1E9781B28CB2000EF0978 /* Sources */,
 				63E1E9791B28CB2000EF0978 /* Frameworks */,
 				63E1E97A1B28CB2000EF0978 /* Resources */,
-				AEFCCC69DD59CE8F6EB769D7 /* Copy Pods Resources */,
+				AEFCCC69DD59CE8F6EB769D7 /* [CP] Copy Pods Resources */,
+				D24F6598302C412D4B863D6F /* [CP] Embed Pods Frameworks */,
 			);
 			buildRules = (
 			);
@@ -177,14 +178,14 @@
 /* End PBXResourcesBuildPhase section */
 
 /* Begin PBXShellScriptBuildPhase section */
-		AEFCCC69DD59CE8F6EB769D7 /* Copy Pods Resources */ = {
+		AEFCCC69DD59CE8F6EB769D7 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -192,14 +193,29 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-AuthSample/Pods-AuthSample-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		DAABBA7B5788A39108D7CA83 /* Check Pods Manifest.lock */ = {
+		D24F6598302C412D4B863D6F /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Check Pods Manifest.lock";
+			name = "[CP] Embed Pods Frameworks";
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-AuthSample/Pods-AuthSample-frameworks.sh\"\n";
+			showEnvVarsInLog = 0;
+		};
+		DAABBA7B5788A39108D7CA83 /* [CP] Check Pods Manifest.lock */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
diff --git a/examples/objective-c/auth_sample/AuthSample.xcodeproj/xcshareddata/xcschemes/AuthSample.xcscheme b/examples/objective-c/auth_sample/AuthSample.xcodeproj/xcshareddata/xcschemes/AuthSample.xcscheme
new file mode 100644
index 0000000000000000000000000000000000000000..0a210712a53d59b80c965b535991702430deaf9e
--- /dev/null
+++ b/examples/objective-c/auth_sample/AuthSample.xcodeproj/xcshareddata/xcschemes/AuthSample.xcscheme
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Scheme
+   LastUpgradeVersion = "0730"
+   version = "1.3">
+   <BuildAction
+      parallelizeBuildables = "YES"
+      buildImplicitDependencies = "YES">
+      <BuildActionEntries>
+         <BuildActionEntry
+            buildForTesting = "YES"
+            buildForRunning = "YES"
+            buildForProfiling = "YES"
+            buildForArchiving = "YES"
+            buildForAnalyzing = "YES">
+            <BuildableReference
+               BuildableIdentifier = "primary"
+               BlueprintIdentifier = "63E1E97B1B28CB2000EF0978"
+               BuildableName = "AuthSample.app"
+               BlueprintName = "AuthSample"
+               ReferencedContainer = "container:AuthSample.xcodeproj">
+            </BuildableReference>
+         </BuildActionEntry>
+      </BuildActionEntries>
+   </BuildAction>
+   <TestAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      shouldUseLaunchSchemeArgsEnv = "YES">
+      <Testables>
+      </Testables>
+      <MacroExpansion>
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "63E1E97B1B28CB2000EF0978"
+            BuildableName = "AuthSample.app"
+            BlueprintName = "AuthSample"
+            ReferencedContainer = "container:AuthSample.xcodeproj">
+         </BuildableReference>
+      </MacroExpansion>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </TestAction>
+   <LaunchAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      launchStyle = "0"
+      useCustomWorkingDirectory = "NO"
+      ignoresPersistentStateOnLaunch = "NO"
+      debugDocumentVersioning = "YES"
+      debugServiceExtension = "internal"
+      allowLocationSimulation = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "63E1E97B1B28CB2000EF0978"
+            BuildableName = "AuthSample.app"
+            BlueprintName = "AuthSample"
+            ReferencedContainer = "container:AuthSample.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </LaunchAction>
+   <ProfileAction
+      buildConfiguration = "Release"
+      shouldUseLaunchSchemeArgsEnv = "YES"
+      savedToolIdentifier = ""
+      useCustomWorkingDirectory = "NO"
+      debugDocumentVersioning = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "63E1E97B1B28CB2000EF0978"
+            BuildableName = "AuthSample.app"
+            BlueprintName = "AuthSample"
+            ReferencedContainer = "container:AuthSample.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+   </ProfileAction>
+   <AnalyzeAction
+      buildConfiguration = "Debug">
+   </AnalyzeAction>
+   <ArchiveAction
+      buildConfiguration = "Release"
+      revealArchiveInOrganizer = "YES">
+   </ArchiveAction>
+</Scheme>
diff --git a/examples/objective-c/auth_sample/AuthTestService.podspec b/examples/objective-c/auth_sample/AuthTestService.podspec
index e9356260534f139be177a0932e13fa4f7952aa85..af5ef2894648feb9e6ba20276edc540dff69805d 100644
--- a/examples/objective-c/auth_sample/AuthTestService.podspec
+++ b/examples/objective-c/auth_sample/AuthTestService.podspec
@@ -2,6 +2,10 @@ Pod::Spec.new do |s|
   s.name     = "AuthTestService"
   s.version  = "0.0.1"
   s.license  = "New BSD"
+  s.authors  = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
+  s.homepage = "http://www.grpc.io/"
+  s.summary = "AuthTestService example"
+  s.source = { :git => 'https://github.com/grpc/grpc.git' }
 
   s.ios.deployment_target = "7.1"
   s.osx.deployment_target = "10.9"
@@ -9,27 +13,51 @@ Pod::Spec.new do |s|
   # Base directory where the .proto files are.
   src = "../../protos"
 
+  # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+  s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+  # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+  pods_root = 'Pods'
+
+  # Path where Cocoapods downloads protoc and the gRPC plugin.
+  protoc_dir = "#{pods_root}/!ProtoCompiler"
+  protoc = "#{protoc_dir}/protoc"
+  plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
   # Directory where the generated files will be placed.
-  dir = "Pods/" + s.name
+  dir = "#{pods_root}/#{s.name}"
 
-  # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
   s.prepare_command = <<-CMD
     mkdir -p #{dir}
-    protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/auth_sample.proto
+    #{protoc} \
+        --plugin=protoc-gen-grpc=#{plugin} \
+        --objc_out=#{dir} \
+        --grpc_out=#{dir} \
+        -I #{src} \
+        -I #{protoc_dir} \
+        #{src}/auth_sample.proto
   CMD
 
+  # Files generated by protoc
   s.subspec "Messages" do |ms|
     ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
     ms.header_mappings_dir = dir
     ms.requires_arc = false
-    ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+    # The generated files depend on the protobuf runtime.
+    ms.dependency "Protobuf"
+    # This is needed by all pods that depend on Protobuf:
+    ms.pod_target_xcconfig = {
+      'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+    }
   end
 
+  # Files generated by the gRPC plugin
   s.subspec "Services" do |ss|
     ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
     ss.header_mappings_dir = dir
     ss.requires_arc = true
-    ss.dependency "gRPC", "~> 0.12"
+    # The generated files depend on the gRPC runtime, and on the files generated by protoc.
+    ss.dependency "gRPC-ProtoRPC"
     ss.dependency "#{s.name}/Messages"
   end
 end
diff --git a/examples/objective-c/auth_sample/Podfile b/examples/objective-c/auth_sample/Podfile
index 7affe08743cb8e80f451c5ad2858bf7558096f63..a25d20f477ddf6ce38a5bd7f0eb34a218ea73e0b 100644
--- a/examples/objective-c/auth_sample/Podfile
+++ b/examples/objective-c/auth_sample/Podfile
@@ -1,9 +1,7 @@
 source 'https://github.com/CocoaPods/Specs.git'
 platform :ios, '8.0'
 
-pod 'Protobuf', :path => "../../../third_party/protobuf"
-pod 'BoringSSL', :podspec => "../../../src/objective-c"
-pod 'gRPC', :path => "../../.."
+install! 'cocoapods', :deterministic_uuids => false
 
 target 'AuthSample' do
   # Depend on the generated AuthTestService library.
diff --git a/examples/objective-c/helloworld/HelloWorld.podspec b/examples/objective-c/helloworld/HelloWorld.podspec
index bdf782f6eaffcac64734bcc7b56d7d543f5d15d0..bce6cd517248485a9eccd84c99028c8d0616293e 100644
--- a/examples/objective-c/helloworld/HelloWorld.podspec
+++ b/examples/objective-c/helloworld/HelloWorld.podspec
@@ -2,6 +2,10 @@ Pod::Spec.new do |s|
   s.name     = "HelloWorld"
   s.version  = "0.0.1"
   s.license  = "New BSD"
+  s.authors  = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
+  s.homepage = "http://www.grpc.io/"
+  s.summary = "HelloWorld example"
+  s.source = { :git => 'https://github.com/grpc/grpc.git' }
 
   s.ios.deployment_target = "7.1"
   s.osx.deployment_target = "10.9"
@@ -9,27 +13,51 @@ Pod::Spec.new do |s|
   # Base directory where the .proto files are.
   src = "../../protos"
 
+  # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+  s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+  # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+  pods_root = 'Pods'
+
+  # Path where Cocoapods downloads protoc and the gRPC plugin.
+  protoc_dir = "#{pods_root}/!ProtoCompiler"
+  protoc = "#{protoc_dir}/protoc"
+  plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
   # Directory where the generated files will be placed.
-  dir = "Pods/" + s.name
+  dir = "#{pods_root}/#{s.name}"
 
-  # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
   s.prepare_command = <<-CMD
     mkdir -p #{dir}
-    protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/helloworld.proto
+    #{protoc} \
+        --plugin=protoc-gen-grpc=#{plugin} \
+        --objc_out=#{dir} \
+        --grpc_out=#{dir} \
+        -I #{src} \
+        -I #{protoc_dir} \
+        #{src}/helloworld.proto
   CMD
 
+  # Files generated by protoc
   s.subspec "Messages" do |ms|
     ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
     ms.header_mappings_dir = dir
     ms.requires_arc = false
-    ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+    # The generated files depend on the protobuf runtime.
+    ms.dependency "Protobuf"
+    # This is needed by all pods that depend on Protobuf:
+    ms.pod_target_xcconfig = {
+      'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+    }
   end
 
+  # Files generated by the gRPC plugin
   s.subspec "Services" do |ss|
     ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
     ss.header_mappings_dir = dir
     ss.requires_arc = true
-    ss.dependency "gRPC", "~> 0.12"
+    # The generated files depend on the gRPC runtime, and on the files generated by protoc.
+    ss.dependency "gRPC-ProtoRPC"
     ss.dependency "#{s.name}/Messages"
   end
 end
diff --git a/examples/objective-c/helloworld/HelloWorld.xcodeproj/project.pbxproj b/examples/objective-c/helloworld/HelloWorld.xcodeproj/project.pbxproj
index 250f009996f72447f2a7c2d7dbfd1a2b4cb363e4..df5c40cda209c781c9299214c70d46be1d42dec5 100644
--- a/examples/objective-c/helloworld/HelloWorld.xcodeproj/project.pbxproj
+++ b/examples/objective-c/helloworld/HelloWorld.xcodeproj/project.pbxproj
@@ -7,7 +7,6 @@
 	objects = {
 
 /* Begin PBXBuildFile section */
-		3EF35C14BDC2B65E21837F02 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 43AB08B32839A6700EA00DD4 /* libPods.a */; };
 		5E3690661B2A23800040F884 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 5E3690651B2A23800040F884 /* main.m */; };
 		5E3690691B2A23800040F884 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 5E3690681B2A23800040F884 /* AppDelegate.m */; };
 		5E36906C1B2A23800040F884 /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 5E36906B1B2A23800040F884 /* ViewController.m */; };
@@ -18,7 +17,6 @@
 
 /* Begin PBXFileReference section */
 		0C432EF610DB15C0F47A66BB /* Pods-HelloWorld.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-HelloWorld.release.xcconfig"; path = "Pods/Target Support Files/Pods-HelloWorld/Pods-HelloWorld.release.xcconfig"; sourceTree = "<group>"; };
-		43AB08B32839A6700EA00DD4 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; };
 		5E3690601B2A23800040F884 /* HelloWorld.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = HelloWorld.app; sourceTree = BUILT_PRODUCTS_DIR; };
 		5E3690641B2A23800040F884 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
 		5E3690651B2A23800040F884 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
@@ -37,7 +35,6 @@
 			buildActionMask = 2147483647;
 			files = (
 				EF61CF6AE2536A31D47F0E63 /* libPods-HelloWorld.a in Frameworks */,
-				3EF35C14BDC2B65E21837F02 /* libPods.a in Frameworks */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -88,7 +85,6 @@
 			isa = PBXGroup;
 			children = (
 				6B4E1F55F8A2EC95A0E7EE88 /* libPods-HelloWorld.a */,
-				43AB08B32839A6700EA00DD4 /* libPods.a */,
 			);
 			name = Frameworks;
 			sourceTree = "<group>";
@@ -109,12 +105,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 5E3690831B2A23810040F884 /* Build configuration list for PBXNativeTarget "HelloWorld" */;
 			buildPhases = (
-				ACF9162361FB8F24C70657DE /* Check Pods Manifest.lock */,
+				ACF9162361FB8F24C70657DE /* [CP] Check Pods Manifest.lock */,
 				5E36905C1B2A23800040F884 /* Sources */,
 				5E36905D1B2A23800040F884 /* Frameworks */,
 				5E36905E1B2A23800040F884 /* Resources */,
-				4C7D815378D98AB3BFC1A7D5 /* Copy Pods Resources */,
-				BB76529986A8BFAF19A385B1 /* Embed Pods Frameworks */,
+				4C7D815378D98AB3BFC1A7D5 /* [CP] Copy Pods Resources */,
+				BB76529986A8BFAF19A385B1 /* [CP] Embed Pods Frameworks */,
 			);
 			buildRules = (
 			);
@@ -170,14 +166,14 @@
 /* End PBXResourcesBuildPhase section */
 
 /* Begin PBXShellScriptBuildPhase section */
-		4C7D815378D98AB3BFC1A7D5 /* Copy Pods Resources */ = {
+		4C7D815378D98AB3BFC1A7D5 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -185,14 +181,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-HelloWorld/Pods-HelloWorld-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		ACF9162361FB8F24C70657DE /* Check Pods Manifest.lock */ = {
+		ACF9162361FB8F24C70657DE /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -200,19 +196,19 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		BB76529986A8BFAF19A385B1 /* Embed Pods Frameworks */ = {
+		BB76529986A8BFAF19A385B1 /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 			shellPath = /bin/sh;
-			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-HelloWorld/Pods-HelloWorld-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
 /* End PBXShellScriptBuildPhase section */
diff --git a/examples/objective-c/helloworld/HelloWorld.xcodeproj/xcshareddata/xcschemes/HelloWorld.xcscheme b/examples/objective-c/helloworld/HelloWorld.xcodeproj/xcshareddata/xcschemes/HelloWorld.xcscheme
new file mode 100644
index 0000000000000000000000000000000000000000..be7b99030c1313492e5a9c090d54c3916594f99f
--- /dev/null
+++ b/examples/objective-c/helloworld/HelloWorld.xcodeproj/xcshareddata/xcschemes/HelloWorld.xcscheme
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Scheme
+   LastUpgradeVersion = "0730"
+   version = "1.3">
+   <BuildAction
+      parallelizeBuildables = "YES"
+      buildImplicitDependencies = "YES">
+      <BuildActionEntries>
+         <BuildActionEntry
+            buildForTesting = "YES"
+            buildForRunning = "YES"
+            buildForProfiling = "YES"
+            buildForArchiving = "YES"
+            buildForAnalyzing = "YES">
+            <BuildableReference
+               BuildableIdentifier = "primary"
+               BlueprintIdentifier = "5E36905F1B2A23800040F884"
+               BuildableName = "HelloWorld.app"
+               BlueprintName = "HelloWorld"
+               ReferencedContainer = "container:HelloWorld.xcodeproj">
+            </BuildableReference>
+         </BuildActionEntry>
+      </BuildActionEntries>
+   </BuildAction>
+   <TestAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      shouldUseLaunchSchemeArgsEnv = "YES">
+      <Testables>
+      </Testables>
+      <MacroExpansion>
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "5E36905F1B2A23800040F884"
+            BuildableName = "HelloWorld.app"
+            BlueprintName = "HelloWorld"
+            ReferencedContainer = "container:HelloWorld.xcodeproj">
+         </BuildableReference>
+      </MacroExpansion>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </TestAction>
+   <LaunchAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      launchStyle = "0"
+      useCustomWorkingDirectory = "NO"
+      ignoresPersistentStateOnLaunch = "NO"
+      debugDocumentVersioning = "YES"
+      debugServiceExtension = "internal"
+      allowLocationSimulation = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "5E36905F1B2A23800040F884"
+            BuildableName = "HelloWorld.app"
+            BlueprintName = "HelloWorld"
+            ReferencedContainer = "container:HelloWorld.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </LaunchAction>
+   <ProfileAction
+      buildConfiguration = "Release"
+      shouldUseLaunchSchemeArgsEnv = "YES"
+      savedToolIdentifier = ""
+      useCustomWorkingDirectory = "NO"
+      debugDocumentVersioning = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "5E36905F1B2A23800040F884"
+            BuildableName = "HelloWorld.app"
+            BlueprintName = "HelloWorld"
+            ReferencedContainer = "container:HelloWorld.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+   </ProfileAction>
+   <AnalyzeAction
+      buildConfiguration = "Debug">
+   </AnalyzeAction>
+   <ArchiveAction
+      buildConfiguration = "Release"
+      revealArchiveInOrganizer = "YES">
+   </ArchiveAction>
+</Scheme>
diff --git a/examples/objective-c/helloworld/Podfile b/examples/objective-c/helloworld/Podfile
index eebf05470d110cb5a9b2cbadbd0d0a02665094ce..0c3feaa47eb84e8d0b552c102614d58ca678b990 100644
--- a/examples/objective-c/helloworld/Podfile
+++ b/examples/objective-c/helloworld/Podfile
@@ -1,9 +1,7 @@
 source 'https://github.com/CocoaPods/Specs.git'
 platform :ios, '8.0'
 
-pod 'Protobuf', :path => "../../../third_party/protobuf"
-pod 'BoringSSL', :podspec => "../../../src/objective-c"
-pod 'gRPC', :path => "../../.."
+install! 'cocoapods', :deterministic_uuids => false
 
 target 'HelloWorld' do
   # Depend on the generated HelloWorld library.
diff --git a/examples/objective-c/helloworld/README.md b/examples/objective-c/helloworld/README.md
index 81c5aaa7bcb4e665433d41bc924ab20ce3d52558..fdff938a9784ff617e9c4b167b2fe450f1a80f97 100644
--- a/examples/objective-c/helloworld/README.md
+++ b/examples/objective-c/helloworld/README.md
@@ -16,7 +16,7 @@ this repository to your local machine by running the following commands:
 
 
 ```sh
-$ git clone https://github.com/grpc/grpc.git
+$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
 $ cd grpc
 $ git submodule update --init
 ```
diff --git a/examples/objective-c/route_guide/Misc/Base.lproj/Main.storyboard b/examples/objective-c/route_guide/Misc/Base.lproj/Main.storyboard
index 9bf9498d62ce5fe74da6ed9addc62e356953d40d..5ca9f4642b9ffbcebd6eb9718193da678148227c 100644
--- a/examples/objective-c/route_guide/Misc/Base.lproj/Main.storyboard
+++ b/examples/objective-c/route_guide/Misc/Base.lproj/Main.storyboard
@@ -1,7 +1,8 @@
 <?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="7702" systemVersion="14D131" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="49e-Tb-3d3">
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="10116" systemVersion="15F34" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="49e-Tb-3d3">
     <dependencies>
-        <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="7701"/>
+        <deployment identifier="iOS"/>
+        <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="10085"/>
     </dependencies>
     <scenes>
         <!--Get Feature-->
@@ -16,33 +17,35 @@
                         <rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
                         <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
                         <subviews>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="scaleToFill" text="Get Feature Demo" textAlignment="center" lineBreakMode="tailTruncation" minimumFontSize="10" translatesAutoresizingMaskIntoConstraints="NO" id="KQZ-1w-vlD">
-                                <rect key="frame" x="150" y="279" width="299" height="42"/>
-                                <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
-                                <fontDescription key="fontDescription" name="Helvetica" family="Helvetica" pointSize="36"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" ambiguous="YES" misplaced="YES" text="Execution log:" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="au7-AW-5ov">
+                                <rect key="frame" x="16" y="0.0" width="257" height="61"/>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" misplaced="YES" text="See ViewControllers.m and this app's log in XCode" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="A5M-7J-77L">
-                                <rect key="frame" x="136" y="329" width="329" height="17"/>
-                                <fontDescription key="fontDescription" type="system" pointSize="14"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" fixedFrame="YES" text="Label" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="2ga-Gd-X9q">
+                                <rect key="frame" x="20" y="82" width="560" height="437"/>
+                                <accessibility key="accessibilityConfiguration">
+                                    <accessibilityTraits key="traits" link="YES"/>
+                                </accessibility>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
                         </subviews>
                         <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
                         <constraints>
-                            <constraint firstAttribute="centerX" secondItem="KQZ-1w-vlD" secondAttribute="centerX" id="6BV-lF-sBN"/>
-                            <constraint firstItem="A5M-7J-77L" firstAttribute="top" secondItem="KQZ-1w-vlD" secondAttribute="bottom" constant="8" symbolic="YES" id="cfb-er-3JN"/>
-                            <constraint firstItem="A5M-7J-77L" firstAttribute="centerX" secondItem="KQZ-1w-vlD" secondAttribute="centerX" id="e1l-AV-tCB"/>
-                            <constraint firstAttribute="centerY" secondItem="KQZ-1w-vlD" secondAttribute="centerY" id="exm-UA-ej4"/>
+                            <constraint firstItem="au7-AW-5ov" firstAttribute="centerX" secondItem="tsR-hK-woN" secondAttribute="centerX" constant="20" id="JAX-zf-Z1I"/>
                         </constraints>
                     </view>
                     <tabBarItem key="tabBarItem" title="Get Feature" image="first" id="acW-dT-cKf"/>
+                    <connections>
+                        <outlet property="outputLabel" destination="2ga-Gd-X9q" id="yXF-xa-kbD"/>
+                    </connections>
                 </viewController>
                 <placeholder placeholderIdentifier="IBFirstResponder" id="W5J-7L-Pyd" sceneMemberID="firstResponder"/>
             </objects>
-            <point key="canvasLocation" x="718" y="-660"/>
+            <point key="canvasLocation" x="733" y="-653"/>
         </scene>
         <!--List Features-->
         <scene sceneID="wg7-f3-ORb">
@@ -56,29 +59,35 @@
                         <rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
                         <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
                         <subviews>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="scaleToFill" text="List Features Demo" textAlignment="center" lineBreakMode="tailTruncation" minimumFontSize="10" translatesAutoresizingMaskIntoConstraints="NO" id="zEq-FU-wV5">
-                                <rect key="frame" x="143" y="279" width="315" height="42"/>
-                                <color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
-                                <fontDescription key="fontDescription" name="Helvetica" family="Helvetica" pointSize="36"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" fixedFrame="YES" text="Label" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="8mE-gq-NQc">
+                                <rect key="frame" x="20" y="114" width="560" height="437"/>
+                                <accessibility key="accessibilityConfiguration">
+                                    <accessibilityTraits key="traits" link="YES"/>
+                                </accessibility>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="See ViewControllers.m and this app's log in XCode" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="NDk-cv-Gan">
-                                <rect key="frame" x="136" y="329" width="329" height="17"/>
-                                <fontDescription key="fontDescription" type="system" pointSize="14"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" ambiguous="YES" misplaced="YES" text="Execution log:" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="DbB-M0-xs2">
+                                <rect key="frame" x="50" y="12" width="257" height="61"/>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
                         </subviews>
                         <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
+                        <accessibility key="accessibilityConfiguration">
+                            <accessibilityTraits key="traits" staticText="YES"/>
+                        </accessibility>
                         <constraints>
-                            <constraint firstItem="NDk-cv-Gan" firstAttribute="top" secondItem="zEq-FU-wV5" secondAttribute="bottom" constant="8" symbolic="YES" id="Day-4N-Vmt"/>
-                            <constraint firstItem="NDk-cv-Gan" firstAttribute="centerX" secondItem="zEq-FU-wV5" secondAttribute="centerX" id="JgO-Fn-dHn"/>
-                            <constraint firstAttribute="centerX" secondItem="zEq-FU-wV5" secondAttribute="centerX" id="qqM-NS-xev"/>
-                            <constraint firstAttribute="centerY" secondItem="zEq-FU-wV5" secondAttribute="centerY" id="qzY-Ky-pLD"/>
+                            <constraint firstItem="DbB-M0-xs2" firstAttribute="centerX" secondItem="QS5-Rx-YEW" secondAttribute="centerX" constant="20" id="UDo-WG-7i3"/>
+                            <constraint firstItem="DbB-M0-xs2" firstAttribute="centerX" secondItem="QS5-Rx-YEW" secondAttribute="centerX" constant="20" id="W7v-LC-HjP"/>
                         </constraints>
                     </view>
                     <tabBarItem key="tabBarItem" title="List Features" image="second" id="cPa-gy-q4n"/>
+                    <connections>
+                        <outlet property="outputLabel" destination="8mE-gq-NQc" id="6rw-Kd-21X"/>
+                    </connections>
                 </viewController>
                 <placeholder placeholderIdentifier="IBFirstResponder" id="4Nw-L8-lE0" sceneMemberID="firstResponder"/>
             </objects>
@@ -117,29 +126,32 @@
                         <rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
                         <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
                         <subviews>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="scaleToFill" text="Record Route Demo" textAlignment="center" lineBreakMode="tailTruncation" minimumFontSize="10" translatesAutoresizingMaskIntoConstraints="NO" id="Nqv-Vr-o8P">
-                                <rect key="frame" x="136" y="279" width="329" height="42"/>
-                                <color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
-                                <fontDescription key="fontDescription" name="Helvetica" family="Helvetica" pointSize="36"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" ambiguous="YES" misplaced="YES" text="Label" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="9wL-iS-tp8">
+                                <rect key="frame" x="20" y="114" width="560" height="437"/>
+                                <accessibility key="accessibilityConfiguration">
+                                    <accessibilityTraits key="traits" link="YES"/>
+                                </accessibility>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="See ViewControllers.m and this app's log in XCode" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="xjS-0N-tLe">
-                                <rect key="frame" x="136" y="329" width="329" height="17"/>
-                                <fontDescription key="fontDescription" type="system" pointSize="14"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" ambiguous="YES" misplaced="YES" text="Execution log:" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="5qv-tY-qxl">
+                                <rect key="frame" x="30" y="10" width="257" height="61"/>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
                         </subviews>
                         <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
                         <constraints>
-                            <constraint firstAttribute="centerX" secondItem="Nqv-Vr-o8P" secondAttribute="centerX" id="1wf-uc-57y"/>
-                            <constraint firstItem="xjS-0N-tLe" firstAttribute="centerX" secondItem="Nqv-Vr-o8P" secondAttribute="centerX" id="Gnh-rN-EQ3"/>
-                            <constraint firstItem="xjS-0N-tLe" firstAttribute="top" secondItem="Nqv-Vr-o8P" secondAttribute="bottom" constant="8" symbolic="YES" id="Xhj-u3-th9"/>
-                            <constraint firstAttribute="centerY" secondItem="Nqv-Vr-o8P" secondAttribute="centerY" id="xqU-v8-Bb3"/>
+                            <constraint firstItem="9wL-iS-tp8" firstAttribute="centerX" secondItem="Wvj-mg-YnO" secondAttribute="centerX" constant="20" id="7TX-Jm-662"/>
+                            <constraint firstItem="5qv-tY-qxl" firstAttribute="centerX" secondItem="Wvj-mg-YnO" secondAttribute="centerX" id="mRS-9u-c2a"/>
                         </constraints>
                     </view>
                     <tabBarItem key="tabBarItem" title="Record Route" image="first" id="PLK-Jm-UyM"/>
+                    <connections>
+                        <outlet property="outputLabel" destination="9wL-iS-tp8" id="xhd-zm-66g"/>
+                    </connections>
                 </viewController>
                 <placeholder placeholderIdentifier="IBFirstResponder" id="9RW-dt-a4q" sceneMemberID="firstResponder"/>
             </objects>
@@ -157,29 +169,31 @@
                         <rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
                         <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
                         <subviews>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="scaleToFill" text="Route Chat Demo" textAlignment="center" lineBreakMode="tailTruncation" minimumFontSize="10" translatesAutoresizingMaskIntoConstraints="NO" id="zUL-Bo-wJt">
-                                <rect key="frame" x="156" y="279" width="289" height="42"/>
-                                <color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
-                                <fontDescription key="fontDescription" name="Helvetica" family="Helvetica" pointSize="36"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" ambiguous="YES" misplaced="YES" text="Execution log:" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="BxD-G9-xhU">
+                                <rect key="frame" x="20" y="10" width="257" height="61"/>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
-                            <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="See ViewControllers.m and this app's log in XCode" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="CgS-1q-Od9">
-                                <rect key="frame" x="136" y="329" width="329" height="17"/>
-                                <fontDescription key="fontDescription" type="system" pointSize="14"/>
+                            <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" fixedFrame="YES" text="Label" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="131-U2-Ogk">
+                                <rect key="frame" x="20" y="114" width="560" height="437"/>
+                                <accessibility key="accessibilityConfiguration">
+                                    <accessibilityTraits key="traits" link="YES"/>
+                                </accessibility>
+                                <fontDescription key="fontDescription" type="system" pointSize="17"/>
                                 <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="calibratedRGB"/>
                                 <nil key="highlightedColor"/>
                             </label>
                         </subviews>
                         <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
                         <constraints>
-                            <constraint firstAttribute="centerY" secondItem="zUL-Bo-wJt" secondAttribute="centerY" id="5hM-q1-ZjM"/>
-                            <constraint firstItem="CgS-1q-Od9" firstAttribute="top" secondItem="zUL-Bo-wJt" secondAttribute="bottom" constant="8" symbolic="YES" id="AqI-Ra-a5O"/>
-                            <constraint firstItem="CgS-1q-Od9" firstAttribute="centerX" secondItem="zUL-Bo-wJt" secondAttribute="centerX" id="K8f-KI-bc6"/>
-                            <constraint firstAttribute="centerX" secondItem="zUL-Bo-wJt" secondAttribute="centerX" id="n8b-x8-Yze"/>
+                            <constraint firstItem="BxD-G9-xhU" firstAttribute="centerX" secondItem="c9d-af-OMP" secondAttribute="centerX" id="wSw-7t-wxX"/>
                         </constraints>
                     </view>
                     <tabBarItem key="tabBarItem" title="Route Chat" image="second" id="p2G-IC-yAR"/>
+                    <connections>
+                        <outlet property="outputLabel" destination="131-U2-Ogk" id="fNw-M5-x1D"/>
+                    </connections>
                 </viewController>
                 <placeholder placeholderIdentifier="IBFirstResponder" id="yUz-se-Cfi" sceneMemberID="firstResponder"/>
             </objects>
diff --git a/examples/objective-c/route_guide/Podfile b/examples/objective-c/route_guide/Podfile
index b9f2fefd6d2a0702ab43c80b45a7fc2b3451f506..b77eb1b11d1df8799bc02625d360dedeb95c039d 100644
--- a/examples/objective-c/route_guide/Podfile
+++ b/examples/objective-c/route_guide/Podfile
@@ -1,10 +1,9 @@
 source 'https://github.com/CocoaPods/Specs.git'
 platform :ios, '8.0'
 
+install! 'cocoapods', :deterministic_uuids => false
+
 target 'RouteGuideClient' do
-  pod 'Protobuf', :path => "../../../third_party/protobuf"
-  pod 'BoringSSL', :podspec => "../../../src/objective-c"
-  pod 'gRPC', :path => "../../.."
   # Depend on the generated RouteGuide library.
   pod 'RouteGuide', :path => '.'
 end
diff --git a/examples/objective-c/route_guide/RouteGuide.podspec b/examples/objective-c/route_guide/RouteGuide.podspec
index 4bc2c42cbb829f228ff10232016aa40734135ac6..e2132507514a43ceb0002480fb4e0c84f4526663 100644
--- a/examples/objective-c/route_guide/RouteGuide.podspec
+++ b/examples/objective-c/route_guide/RouteGuide.podspec
@@ -2,6 +2,10 @@ Pod::Spec.new do |s|
   s.name     = "RouteGuide"
   s.version  = "0.0.1"
   s.license  = "New BSD"
+  s.authors  = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
+  s.homepage = "http://www.grpc.io/"
+  s.summary = "RouteGuide example"
+  s.source = { :git => 'https://github.com/grpc/grpc.git' }
 
   s.ios.deployment_target = "7.1"
   s.osx.deployment_target = "10.9"
@@ -9,27 +13,51 @@ Pod::Spec.new do |s|
   # Base directory where the .proto files are.
   src = "../../protos"
 
+  # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+  s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+  # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+  pods_root = 'Pods'
+
+  # Path where Cocoapods downloads protoc and the gRPC plugin.
+  protoc_dir = "#{pods_root}/!ProtoCompiler"
+  protoc = "#{protoc_dir}/protoc"
+  plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
   # Directory where the generated files will be placed.
-  dir = "Pods/" + s.name
+  dir = "#{pods_root}/#{s.name}"
 
-  # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
   s.prepare_command = <<-CMD
     mkdir -p #{dir}
-    protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/route_guide.proto
+    #{protoc} \
+        --plugin=protoc-gen-grpc=#{plugin} \
+        --objc_out=#{dir} \
+        --grpc_out=#{dir} \
+        -I #{src} \
+        -I #{protoc_dir} \
+        #{src}/route_guide.proto
   CMD
 
+  # Files generated by protoc
   s.subspec "Messages" do |ms|
     ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
     ms.header_mappings_dir = dir
     ms.requires_arc = false
-    ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+    # The generated files depend on the protobuf runtime.
+    ms.dependency "Protobuf"
+    # This is needed by all pods that depend on Protobuf:
+    ms.pod_target_xcconfig = {
+      'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+    }
   end
 
+  # Files generated by the gRPC plugin
   s.subspec "Services" do |ss|
     ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
     ss.header_mappings_dir = dir
     ss.requires_arc = true
-    ss.dependency "gRPC", "~> 0.12"
+    # The generated files depend on the gRPC runtime, and on the files generated by protoc.
+    ss.dependency "gRPC-ProtoRPC"
     ss.dependency "#{s.name}/Messages"
   end
 end
diff --git a/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/project.pbxproj b/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/project.pbxproj
index f99775562c0630139f58050dad391788d0de4f5a..0bb84b3b9055a57d66c2a17e5d841896624c078e 100644
--- a/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/project.pbxproj
+++ b/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/project.pbxproj
@@ -116,12 +116,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 632527A31B1D0396003073D9 /* Build configuration list for PBXNativeTarget "RouteGuideClient" */;
 			buildPhases = (
-				C6FC30AD2376EC04317237C5 /* Check Pods Manifest.lock */,
+				C6FC30AD2376EC04317237C5 /* [CP] Check Pods Manifest.lock */,
 				632527791B1D0395003073D9 /* Sources */,
 				6325277A1B1D0395003073D9 /* Frameworks */,
 				6325277B1B1D0395003073D9 /* Resources */,
-				FFE0BCF30339E7A50A989EAB /* Copy Pods Resources */,
-				B5388EC5A25E89021740B916 /* Embed Pods Frameworks */,
+				FFE0BCF30339E7A50A989EAB /* [CP] Copy Pods Resources */,
+				B5388EC5A25E89021740B916 /* [CP] Embed Pods Frameworks */,
 			);
 			buildRules = (
 			);
@@ -178,14 +178,14 @@
 /* End PBXResourcesBuildPhase section */
 
 /* Begin PBXShellScriptBuildPhase section */
-		B5388EC5A25E89021740B916 /* Embed Pods Frameworks */ = {
+		B5388EC5A25E89021740B916 /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -193,14 +193,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-RouteGuideClient/Pods-RouteGuideClient-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		C6FC30AD2376EC04317237C5 /* Check Pods Manifest.lock */ = {
+		C6FC30AD2376EC04317237C5 /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -208,14 +208,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		FFE0BCF30339E7A50A989EAB /* Copy Pods Resources */ = {
+		FFE0BCF30339E7A50A989EAB /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
diff --git a/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/xcshareddata/xcschemes/RouteGuideClient.xcscheme b/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/xcshareddata/xcschemes/RouteGuideClient.xcscheme
new file mode 100644
index 0000000000000000000000000000000000000000..42806a738ff497b3321c4175bd7a5290ef8d68f8
--- /dev/null
+++ b/examples/objective-c/route_guide/RouteGuideClient.xcodeproj/xcshareddata/xcschemes/RouteGuideClient.xcscheme
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Scheme
+   LastUpgradeVersion = "0730"
+   version = "1.3">
+   <BuildAction
+      parallelizeBuildables = "YES"
+      buildImplicitDependencies = "YES">
+      <BuildActionEntries>
+         <BuildActionEntry
+            buildForTesting = "YES"
+            buildForRunning = "YES"
+            buildForProfiling = "YES"
+            buildForArchiving = "YES"
+            buildForAnalyzing = "YES">
+            <BuildableReference
+               BuildableIdentifier = "primary"
+               BlueprintIdentifier = "6325277C1B1D0395003073D9"
+               BuildableName = "RouteGuideClient.app"
+               BlueprintName = "RouteGuideClient"
+               ReferencedContainer = "container:RouteGuideClient.xcodeproj">
+            </BuildableReference>
+         </BuildActionEntry>
+      </BuildActionEntries>
+   </BuildAction>
+   <TestAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      shouldUseLaunchSchemeArgsEnv = "YES">
+      <Testables>
+      </Testables>
+      <MacroExpansion>
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "6325277C1B1D0395003073D9"
+            BuildableName = "RouteGuideClient.app"
+            BlueprintName = "RouteGuideClient"
+            ReferencedContainer = "container:RouteGuideClient.xcodeproj">
+         </BuildableReference>
+      </MacroExpansion>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </TestAction>
+   <LaunchAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      launchStyle = "0"
+      useCustomWorkingDirectory = "NO"
+      ignoresPersistentStateOnLaunch = "NO"
+      debugDocumentVersioning = "YES"
+      debugServiceExtension = "internal"
+      allowLocationSimulation = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "6325277C1B1D0395003073D9"
+            BuildableName = "RouteGuideClient.app"
+            BlueprintName = "RouteGuideClient"
+            ReferencedContainer = "container:RouteGuideClient.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </LaunchAction>
+   <ProfileAction
+      buildConfiguration = "Release"
+      shouldUseLaunchSchemeArgsEnv = "YES"
+      savedToolIdentifier = ""
+      useCustomWorkingDirectory = "NO"
+      debugDocumentVersioning = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "6325277C1B1D0395003073D9"
+            BuildableName = "RouteGuideClient.app"
+            BlueprintName = "RouteGuideClient"
+            ReferencedContainer = "container:RouteGuideClient.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+   </ProfileAction>
+   <AnalyzeAction
+      buildConfiguration = "Debug">
+   </AnalyzeAction>
+   <ArchiveAction
+      buildConfiguration = "Release"
+      revealArchiveInOrganizer = "YES">
+   </ArchiveAction>
+</Scheme>
diff --git a/examples/objective-c/route_guide/ViewControllers.m b/examples/objective-c/route_guide/ViewControllers.m
index e32978240b6b868fefdbdf5bbe8d45307d5d0bce..b2f99c437e701f676cffa52dd1ba6afb55514057 100644
--- a/examples/objective-c/route_guide/ViewControllers.m
+++ b/examples/objective-c/route_guide/ViewControllers.m
@@ -80,20 +80,30 @@ static NSString * const kHostAddress = @"localhost:50051";
  * Run the getFeature demo. Calls getFeature with a point known to have a feature and a point known
  * not to have a feature.
  */
-@interface GetFeatureViewController : UIViewController {
-  RTGRouteGuide *service;
-}
+@interface GetFeatureViewController : UIViewController
+
+@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
+
 @end
 
-@implementation GetFeatureViewController
+@implementation GetFeatureViewController {
+  RTGRouteGuide *_service;
+}
 
 - (void)execRequest {
   void (^handler)(RTGFeature *response, NSError *error) = ^(RTGFeature *response, NSError *error) {
+    // TODO(makdharma): Remove boilerplate by consolidating into one log function.
     if (response.name.length) {
+      NSString *str =[NSString stringWithFormat:@"%@\nFound feature called %@ at %@.", self.outputLabel.text, response.location, response.name];
+      self.outputLabel.text = str;
       NSLog(@"Found feature called %@ at %@.", response.name, response.location);
     } else if (response) {
+      NSString *str =[NSString stringWithFormat:@"%@\nFound no features at %@",  self.outputLabel.text,response.location];
+      self.outputLabel.text = str;
       NSLog(@"Found no features at %@", response.location);
     } else {
+      NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+      self.outputLabel.text = str;
       NSLog(@"RPC error: %@", error);
     }
   };
@@ -102,8 +112,8 @@ static NSString * const kHostAddress = @"localhost:50051";
   point.latitude = 409146138;
   point.longitude = -746188906;
 
-  [service getFeatureWithRequest:point handler:handler];
-  [service getFeatureWithRequest:[RTGPoint message] handler:handler];
+  [_service getFeatureWithRequest:point handler:handler];
+  [_service getFeatureWithRequest:[RTGPoint message] handler:handler];
 }
 
 - (void)viewDidLoad {
@@ -112,10 +122,13 @@ static NSString * const kHostAddress = @"localhost:50051";
   // This only needs to be done once per host, before creating service objects for that host.
   [GRPCCall useInsecureConnectionsForHost:kHostAddress];
 
-  service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
+  _service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
 }
 
 - (void)viewDidAppear:(BOOL)animated {
+  self.outputLabel.text = @"RPC log:";
+  self.outputLabel.numberOfLines = 0;
+  self.outputLabel.font = [UIFont fontWithName:@"Helvetica Neue" size:8.0];
   [self execRequest];
 }
 
@@ -128,13 +141,15 @@ static NSString * const kHostAddress = @"localhost:50051";
  * Run the listFeatures demo. Calls listFeatures with a rectangle containing all of the features in
  * the pre-generated database. Prints each response as it comes in.
  */
-@interface ListFeaturesViewController : UIViewController {
-  RTGRouteGuide *service;
-}
+@interface ListFeaturesViewController : UIViewController
+
+@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
-@implementation ListFeaturesViewController
+@implementation ListFeaturesViewController {
+  RTGRouteGuide *_service;
+}
 
 - (void)execRequest {
   RTGRectangle *rectangle = [RTGRectangle message];
@@ -144,11 +159,15 @@ static NSString * const kHostAddress = @"localhost:50051";
   rectangle.hi.longitude = -745E6;
 
   NSLog(@"Looking for features between %@ and %@", rectangle.lo, rectangle.hi);
-  [service listFeaturesWithRequest:rectangle
+  [_service listFeaturesWithRequest:rectangle
                       eventHandler:^(BOOL done, RTGFeature *response, NSError *error) {
     if (response) {
+      NSString *str =[NSString stringWithFormat:@"%@\nFound feature at %@ called %@.", self.outputLabel.text, response.location, response.name];
+      self.outputLabel.text = str;
       NSLog(@"Found feature at %@ called %@.", response.location, response.name);
     } else if (error) {
+      NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+      self.outputLabel.text = str;
       NSLog(@"RPC error: %@", error);
     }
   }];
@@ -157,10 +176,13 @@ static NSString * const kHostAddress = @"localhost:50051";
 - (void)viewDidLoad {
   [super viewDidLoad];
 
-  service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
+  _service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
 }
 
 - (void)viewDidAppear:(BOOL)animated {
+  self.outputLabel.text = @"RPC log:";
+  self.outputLabel.numberOfLines = 0;
+  self.outputLabel.font = [UIFont fontWithName:@"Helvetica Neue" size:8.0];
   [self execRequest];
 }
 
@@ -174,13 +196,15 @@ static NSString * const kHostAddress = @"localhost:50051";
  * database with a variable delay in between. Prints the statistics when they are sent from the
  * server.
  */
-@interface RecordRouteViewController : UIViewController {
-  RTGRouteGuide *service;
-}
+@interface RecordRouteViewController : UIViewController
+
+@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
-@implementation RecordRouteViewController
+@implementation RecordRouteViewController {
+  RTGRouteGuide *_service;
+}
 
 - (void)execRequest {
   NSString *dataBasePath = [NSBundle.mainBundle pathForResource:@"route_guide_db"
@@ -192,18 +216,28 @@ static NSString * const kHostAddress = @"localhost:50051";
     RTGPoint *location = [RTGPoint message];
     location.longitude = [((NSNumber *) feature[@"location"][@"longitude"]) intValue];
     location.latitude = [((NSNumber *) feature[@"location"][@"latitude"]) intValue];
+    NSString *str =[NSString stringWithFormat:@"%@\nVisiting point %@", self.outputLabel.text, location];
+    self.outputLabel.text = str;
     NSLog(@"Visiting point %@", location);
     return location;
   }];
 
-  [service recordRouteWithRequestsWriter:locations
+  [_service recordRouteWithRequestsWriter:locations
                                  handler:^(RTGRouteSummary *response, NSError *error) {
     if (response) {
+      NSString *str =[NSString stringWithFormat:
+                      @"%@\nFinished trip with %i points\nPassed %i features\n"
+                      "Travelled %i meters\nIt took %i seconds",
+                      self.outputLabel.text, response.pointCount, response.featureCount,
+                      response.distance, response.elapsedTime];
+      self.outputLabel.text = str;
       NSLog(@"Finished trip with %i points", response.pointCount);
       NSLog(@"Passed %i features", response.featureCount);
       NSLog(@"Travelled %i meters", response.distance);
       NSLog(@"It took %i seconds", response.elapsedTime);
     } else {
+      NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+      self.outputLabel.text = str;
       NSLog(@"RPC error: %@", error);
     }
   }];
@@ -212,10 +246,13 @@ static NSString * const kHostAddress = @"localhost:50051";
 - (void)viewDidLoad {
   [super viewDidLoad];
 
-  service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
+  _service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
 }
 
 - (void)viewDidAppear:(BOOL)animated {
+  self.outputLabel.text = @"RPC log:";
+  self.outputLabel.numberOfLines = 0;
+  self.outputLabel.font = [UIFont fontWithName:@"Helvetica Neue" size:8.0];
   [self execRequest];
 }
 
@@ -228,13 +265,15 @@ static NSString * const kHostAddress = @"localhost:50051";
  * Run the routeChat demo. Send some chat messages, and print any chat messages that are sent from
  * the server.
  */
-@interface RouteChatViewController : UIViewController {
-  RTGRouteGuide *service;
-}
+@interface RouteChatViewController : UIViewController
+
+@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
-@implementation RouteChatViewController
+@implementation RouteChatViewController {
+  RTGRouteGuide *_service;
+}
 
 - (void)execRequest {
   NSArray *notes = @[[RTGRouteNote noteWithMessage:@"First message" latitude:0 longitude:0],
@@ -246,11 +285,16 @@ static NSString * const kHostAddress = @"localhost:50051";
     return note;
   }];
 
-  [service routeChatWithRequestsWriter:notesWriter
+  [_service routeChatWithRequestsWriter:notesWriter
                           eventHandler:^(BOOL done, RTGRouteNote *note, NSError *error) {
     if (note) {
+      NSString *str =[NSString stringWithFormat:@"%@\nGot message %@ at %@",
+                      self.outputLabel.text, note.message, note.location];
+      self.outputLabel.text = str;
       NSLog(@"Got message %@ at %@", note.message, note.location);
     } else if (error) {
+      NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+      self.outputLabel.text = str;
       NSLog(@"RPC error: %@", error);
     }
     if (done) {
@@ -262,10 +306,14 @@ static NSString * const kHostAddress = @"localhost:50051";
 - (void)viewDidLoad {
   [super viewDidLoad];
 
-  service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
+  _service = [[RTGRouteGuide alloc] initWithHost:kHostAddress];
 }
 
 - (void)viewDidAppear:(BOOL)animated {
+  // TODO(makarandd): Set these properties through UI builder
+  self.outputLabel.text = @"RPC log:";
+  self.outputLabel.numberOfLines = 0;
+  self.outputLabel.font = [UIFont fontWithName:@"Helvetica Neue" size:8.0];
   [self execRequest];
 }
 
diff --git a/examples/php/README.md b/examples/php/README.md
index ea9ccb679086a53587f6c9ef908310a8216441e5..54cc97d8c2f8cca65b0fd65931c43b64d1b95664 100644
--- a/examples/php/README.md
+++ b/examples/php/README.md
@@ -11,13 +11,13 @@ INSTALL
  - Install the gRPC PHP extension
 
    ```sh
-   $ [sudo] pecl install grpc-beta
+   $ [sudo] pecl install grpc
    ```
 
  - Clone this repository
 
    ```sh
-   $ git clone https://github.com/grpc/grpc.git
+   $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
    ```
 
  - Install composer
@@ -37,7 +37,8 @@ TRY IT!
    ```
    $ cd examples/node
    $ npm install
-   $ nodejs greeter_server.js
+   $ cd dynamic_codegen or cd static_codegen
+   $ node greeter_server.js
    ```
 
  - Run the client
diff --git a/examples/php/composer.json b/examples/php/composer.json
index c837bf7ac0febe029f3fe41eb641884ff15ba13c..a8b790b1de16c4b8d232e0a03093b10f7265375b 100644
--- a/examples/php/composer.json
+++ b/examples/php/composer.json
@@ -2,13 +2,7 @@
   "name": "grpc/grpc-demo",
   "description": "gRPC example for PHP",
   "minimum-stability": "dev",
-  "repositories": [
-    {
-      "type": "vcs",
-      "url": "https://github.com/stanley-cheung/Protobuf-PHP"
-    }
-  ],
   "require": {
-    "grpc/grpc": "dev-release-0_13"
+    "grpc/grpc": "v0.15.0"
   }
 }
diff --git a/examples/python/helloworld/run_codegen.sh b/examples/python/helloworld/run_codegen.sh
index 42b58e50217d2710e29a0d210a2159947c56ed79..34224e5c418f58ab54ffb2d1f460d0682274d81b 100755
--- a/examples/python/helloworld/run_codegen.sh
+++ b/examples/python/helloworld/run_codegen.sh
@@ -29,4 +29,4 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # Runs the protoc with gRPC plugin to generate protocol messages and gRPC stubs.
-protoc -I ../../protos --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ../../protos/helloworld.proto
+python -m grpc.tools.protoc -I../../protos --python_out=. --grpc_python_out=. ../../protos/helloworld.proto
diff --git a/examples/python/route_guide/run_codegen.sh b/examples/python/route_guide/run_codegen.sh
index d9d56c2d7ab2a7a98afeeafd4987adf3c8cf2dff..a377a1ab40903920aea3a6e92c7e51ca7166a818 100755
--- a/examples/python/route_guide/run_codegen.sh
+++ b/examples/python/route_guide/run_codegen.sh
@@ -29,4 +29,4 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # Runs the protoc with gRPC plugin to generate protocol messages and gRPC stubs.
-protoc -I ../../protos --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ../../protos/route_guide.proto
+python -m grpc.tools.protoc -I../../protos --python_out=. --grpc_python_out=. ../../protos/route_guide.proto
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
new file mode 100644
index 0000000000000000000000000000000000000000..9f0d41573f5bb312a289d639f1a88edf009f60de
--- /dev/null
+++ b/gRPC-Core.podspec
@@ -0,0 +1,765 @@
+# GRPC CocoaPods podspec
+# This file has been automatically generated from a template file. Please make modifications to
+# `templates/gRPC-Core.podspec.template` instead. This file can be regenerated from the template by
+# running `tools/buildgen/generate_projects.sh`.
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Pod::Spec.new do |s|
+  s.name     = 'gRPC-Core'
+  version = '0.14.0'
+  s.version  = version
+  s.summary  = 'Core cross-platform gRPC library, written in C'
+  s.homepage = 'http://www.grpc.io'
+  s.license  = 'New BSD'
+  s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
+
+  s.source = {
+    :git => 'https://github.com/grpc/grpc.git',
+    :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
+    # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules.
+    :submodules => true,
+  }
+
+  s.ios.deployment_target = '7.1'
+  s.osx.deployment_target = '10.9'
+  s.requires_arc = false
+
+  name = 'grpc'
+
+  # When creating a dynamic framework, name it grpc.framework instead of gRPC-Core.framework.
+  # This lets users write their includes like `#include <grpc/grpc.h>` as opposed to `#include
+  # <gRPC-Core/grpc.h>`.
+  s.module_name = name
+
+  # When creating a dynamic framework, copy the headers under `include/grpc/` into the root of
+  # the `Headers/` directory of the framework (i.e., not under `Headers/include/grpc`).
+  #
+  # TODO(jcanizales): Debug why this doesn't work on macOS.
+  s.header_mappings_dir = 'include/grpc'
+
+  # The above has an undesired effect when creating a static library: It forces users to write
+  # includes like `#include <gRPC-Core/grpc.h>`. `s.header_dir` adds a path prefix to that, and
+  # because Cocoapods lets omit the pod name when including headers of static libraries, the
+  # following lets users write `#include <grpc/grpc.h>`.
+  s.header_dir = name
+
+  # The module map created automatically by Cocoapods doesn't work for C libraries like gRPC-Core.
+  s.module_map = 'include/grpc/module.modulemap'
+
+  # To compile the library, we need the user headers search path (quoted includes) to point to the
+  # root of the repo, and the system headers search path (angled includes) to point to `include/`.
+  # Cocoapods effectively clones the repo under `<Podfile dir>/Pods/gRPC-Core/`, and sets a build
+  # variable called `$(PODS_ROOT)` to `<Podfile dir>/Pods/`, so we use that.
+  #
+  # Relying on the file structure under $(PODS_ROOT) isn't officially supported in Cocoapods, as it
+  # is taken as an implementation detail. We've asked for an alternative, and have been told that
+  # what we're doing should keep working: https://github.com/CocoaPods/CocoaPods/issues/4386
+  #
+  # The `src_root` value of `$(PODS_ROOT)/gRPC-Core` assumes Cocoapods is installing this pod from
+  # its remote repo. For local development of this library, enabled by using `:path` in the Podfile,
+  # that assumption is wrong. In such case, the following settings need to be reset with the
+  # appropriate value of `src_root`. This can be accomplished in the `pre_install` hook of the
+  # Podfile; see `src/objective-c/tests/Podfile` for an example.
+  src_root = '$(PODS_ROOT)/gRPC-Core'
+  s.pod_target_xcconfig = {
+    'GRPC_SRC_ROOT' => src_root,
+    'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
+    'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
+    # If we don't set these two settings, `include/grpc/support/time.h` and
+    # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
+    # build.
+    'USE_HEADERMAP' => 'NO',
+    'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+  }
+
+  # Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its
+  # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't
+  # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in
+  # practice). Because we need our `header_mappings_dir` to be `include/grpc/` for the reason
+  # mentioned above, we work around the linter limitation by dividing the pod into two subspecs, one
+  # for public headers and the other for implementation. Each gets its own `header_mappings_dir`,
+  # making the linter happy.
+  #
+  # The list of source files is generated by a template: `templates/gRPC-Core.podspec.template`. It
+  # can be regenerated from the template by running `tools/buildgen/generate_projects.sh`.
+  s.subspec 'Interface' do |ss|
+    ss.header_mappings_dir = 'include/grpc'
+
+    ss.source_files = 'include/grpc/support/alloc.h',
+                      'include/grpc/support/atm.h',
+                      'include/grpc/support/atm_gcc_atomic.h',
+                      'include/grpc/support/atm_gcc_sync.h',
+                      'include/grpc/support/atm_windows.h',
+                      'include/grpc/support/avl.h',
+                      'include/grpc/support/cmdline.h',
+                      'include/grpc/support/cpu.h',
+                      'include/grpc/support/histogram.h',
+                      'include/grpc/support/host_port.h',
+                      'include/grpc/support/log.h',
+                      'include/grpc/support/log_windows.h',
+                      'include/grpc/support/port_platform.h',
+                      'include/grpc/support/slice.h',
+                      'include/grpc/support/slice_buffer.h',
+                      'include/grpc/support/string_util.h',
+                      'include/grpc/support/subprocess.h',
+                      'include/grpc/support/sync.h',
+                      'include/grpc/support/sync_generic.h',
+                      'include/grpc/support/sync_posix.h',
+                      'include/grpc/support/sync_windows.h',
+                      'include/grpc/support/thd.h',
+                      'include/grpc/support/time.h',
+                      'include/grpc/support/tls.h',
+                      'include/grpc/support/tls_gcc.h',
+                      'include/grpc/support/tls_msvc.h',
+                      'include/grpc/support/tls_pthread.h',
+                      'include/grpc/support/useful.h',
+                      'include/grpc/impl/codegen/alloc.h',
+                      'include/grpc/impl/codegen/atm.h',
+                      'include/grpc/impl/codegen/atm_gcc_atomic.h',
+                      'include/grpc/impl/codegen/atm_gcc_sync.h',
+                      'include/grpc/impl/codegen/atm_windows.h',
+                      'include/grpc/impl/codegen/log.h',
+                      'include/grpc/impl/codegen/port_platform.h',
+                      'include/grpc/impl/codegen/slice.h',
+                      'include/grpc/impl/codegen/slice_buffer.h',
+                      'include/grpc/impl/codegen/sync.h',
+                      'include/grpc/impl/codegen/sync_generic.h',
+                      'include/grpc/impl/codegen/sync_posix.h',
+                      'include/grpc/impl/codegen/sync_windows.h',
+                      'include/grpc/impl/codegen/time.h',
+                      'include/grpc/byte_buffer.h',
+                      'include/grpc/byte_buffer_reader.h',
+                      'include/grpc/compression.h',
+                      'include/grpc/grpc.h',
+                      'include/grpc/grpc_posix.h',
+                      'include/grpc/status.h',
+                      'include/grpc/impl/codegen/byte_buffer.h',
+                      'include/grpc/impl/codegen/byte_buffer_reader.h',
+                      'include/grpc/impl/codegen/compression_types.h',
+                      'include/grpc/impl/codegen/connectivity_state.h',
+                      'include/grpc/impl/codegen/grpc_types.h',
+                      'include/grpc/impl/codegen/propagation_bits.h',
+                      'include/grpc/impl/codegen/status.h',
+                      'include/grpc/impl/codegen/alloc.h',
+                      'include/grpc/impl/codegen/atm.h',
+                      'include/grpc/impl/codegen/atm_gcc_atomic.h',
+                      'include/grpc/impl/codegen/atm_gcc_sync.h',
+                      'include/grpc/impl/codegen/atm_windows.h',
+                      'include/grpc/impl/codegen/log.h',
+                      'include/grpc/impl/codegen/port_platform.h',
+                      'include/grpc/impl/codegen/slice.h',
+                      'include/grpc/impl/codegen/slice_buffer.h',
+                      'include/grpc/impl/codegen/sync.h',
+                      'include/grpc/impl/codegen/sync_generic.h',
+                      'include/grpc/impl/codegen/sync_posix.h',
+                      'include/grpc/impl/codegen/sync_windows.h',
+                      'include/grpc/impl/codegen/time.h',
+                      'include/grpc/grpc_security.h',
+                      'include/grpc/grpc_security_constants.h',
+                      'include/grpc/census.h'
+  end
+  s.subspec 'Implementation' do |ss|
+    ss.header_mappings_dir = '.'
+    ss.libraries = 'z'
+    ss.dependency "#{s.name}/Interface", version
+    ss.dependency 'BoringSSL', '~> 4.0'
+
+    # To save you from scrolling, this is the last part of the podspec.
+    ss.source_files = 'src/core/lib/profiling/timers.h',
+                      'src/core/lib/support/backoff.h',
+                      'src/core/lib/support/block_annotate.h',
+                      'src/core/lib/support/env.h',
+                      'src/core/lib/support/murmur_hash.h',
+                      'src/core/lib/support/stack_lockfree.h',
+                      'src/core/lib/support/string.h',
+                      'src/core/lib/support/string_windows.h',
+                      'src/core/lib/support/thd_internal.h',
+                      'src/core/lib/support/time_precise.h',
+                      'src/core/lib/support/tmpfile.h',
+                      'src/core/lib/profiling/basic_timers.c',
+                      'src/core/lib/profiling/stap_timers.c',
+                      'src/core/lib/support/alloc.c',
+                      'src/core/lib/support/avl.c',
+                      'src/core/lib/support/backoff.c',
+                      'src/core/lib/support/cmdline.c',
+                      'src/core/lib/support/cpu_iphone.c',
+                      'src/core/lib/support/cpu_linux.c',
+                      'src/core/lib/support/cpu_posix.c',
+                      'src/core/lib/support/cpu_windows.c',
+                      'src/core/lib/support/env_linux.c',
+                      'src/core/lib/support/env_posix.c',
+                      'src/core/lib/support/env_windows.c',
+                      'src/core/lib/support/histogram.c',
+                      'src/core/lib/support/host_port.c',
+                      'src/core/lib/support/log.c',
+                      'src/core/lib/support/log_android.c',
+                      'src/core/lib/support/log_linux.c',
+                      'src/core/lib/support/log_posix.c',
+                      'src/core/lib/support/log_windows.c',
+                      'src/core/lib/support/murmur_hash.c',
+                      'src/core/lib/support/slice.c',
+                      'src/core/lib/support/slice_buffer.c',
+                      'src/core/lib/support/stack_lockfree.c',
+                      'src/core/lib/support/string.c',
+                      'src/core/lib/support/string_posix.c',
+                      'src/core/lib/support/string_util_windows.c',
+                      'src/core/lib/support/string_windows.c',
+                      'src/core/lib/support/subprocess_posix.c',
+                      'src/core/lib/support/subprocess_windows.c',
+                      'src/core/lib/support/sync.c',
+                      'src/core/lib/support/sync_posix.c',
+                      'src/core/lib/support/sync_windows.c',
+                      'src/core/lib/support/thd.c',
+                      'src/core/lib/support/thd_posix.c',
+                      'src/core/lib/support/thd_windows.c',
+                      'src/core/lib/support/time.c',
+                      'src/core/lib/support/time_posix.c',
+                      'src/core/lib/support/time_precise.c',
+                      'src/core/lib/support/time_windows.c',
+                      'src/core/lib/support/tls_pthread.c',
+                      'src/core/lib/support/tmpfile_msys.c',
+                      'src/core/lib/support/tmpfile_posix.c',
+                      'src/core/lib/support/tmpfile_windows.c',
+                      'src/core/lib/support/wrap_memcpy.c',
+                      'src/core/lib/channel/channel_args.h',
+                      'src/core/lib/channel/channel_stack.h',
+                      'src/core/lib/channel/channel_stack_builder.h',
+                      'src/core/lib/channel/compress_filter.h',
+                      'src/core/lib/channel/connected_channel.h',
+                      'src/core/lib/channel/context.h',
+                      'src/core/lib/channel/http_client_filter.h',
+                      'src/core/lib/channel/http_server_filter.h',
+                      'src/core/lib/compression/algorithm_metadata.h',
+                      'src/core/lib/compression/message_compress.h',
+                      'src/core/lib/debug/trace.h',
+                      'src/core/lib/http/format_request.h',
+                      'src/core/lib/http/httpcli.h',
+                      'src/core/lib/http/parser.h',
+                      'src/core/lib/iomgr/closure.h',
+                      'src/core/lib/iomgr/endpoint.h',
+                      'src/core/lib/iomgr/endpoint_pair.h',
+                      'src/core/lib/iomgr/error.h',
+                      'src/core/lib/iomgr/ev_epoll_linux.h',
+                      'src/core/lib/iomgr/ev_poll_and_epoll_posix.h',
+                      'src/core/lib/iomgr/ev_poll_posix.h',
+                      'src/core/lib/iomgr/ev_posix.h',
+                      'src/core/lib/iomgr/exec_ctx.h',
+                      'src/core/lib/iomgr/executor.h',
+                      'src/core/lib/iomgr/iocp_windows.h',
+                      'src/core/lib/iomgr/iomgr.h',
+                      'src/core/lib/iomgr/iomgr_internal.h',
+                      'src/core/lib/iomgr/iomgr_posix.h',
+                      'src/core/lib/iomgr/load_file.h',
+                      'src/core/lib/iomgr/network_status_tracker.h',
+                      'src/core/lib/iomgr/polling_entity.h',
+                      'src/core/lib/iomgr/pollset.h',
+                      'src/core/lib/iomgr/pollset_set.h',
+                      'src/core/lib/iomgr/pollset_set_windows.h',
+                      'src/core/lib/iomgr/pollset_windows.h',
+                      'src/core/lib/iomgr/resolve_address.h',
+                      'src/core/lib/iomgr/sockaddr.h',
+                      'src/core/lib/iomgr/sockaddr_posix.h',
+                      'src/core/lib/iomgr/sockaddr_utils.h',
+                      'src/core/lib/iomgr/sockaddr_windows.h',
+                      'src/core/lib/iomgr/socket_utils_posix.h',
+                      'src/core/lib/iomgr/socket_windows.h',
+                      'src/core/lib/iomgr/tcp_client.h',
+                      'src/core/lib/iomgr/tcp_posix.h',
+                      'src/core/lib/iomgr/tcp_server.h',
+                      'src/core/lib/iomgr/tcp_windows.h',
+                      'src/core/lib/iomgr/time_averaged_stats.h',
+                      'src/core/lib/iomgr/timer.h',
+                      'src/core/lib/iomgr/timer_heap.h',
+                      'src/core/lib/iomgr/udp_server.h',
+                      'src/core/lib/iomgr/unix_sockets_posix.h',
+                      'src/core/lib/iomgr/wakeup_fd_pipe.h',
+                      'src/core/lib/iomgr/wakeup_fd_posix.h',
+                      'src/core/lib/iomgr/workqueue.h',
+                      'src/core/lib/iomgr/workqueue_posix.h',
+                      'src/core/lib/iomgr/workqueue_windows.h',
+                      'src/core/lib/json/json.h',
+                      'src/core/lib/json/json_common.h',
+                      'src/core/lib/json/json_reader.h',
+                      'src/core/lib/json/json_writer.h',
+                      'src/core/lib/surface/api_trace.h',
+                      'src/core/lib/surface/call.h',
+                      'src/core/lib/surface/call_test_only.h',
+                      'src/core/lib/surface/channel.h',
+                      'src/core/lib/surface/channel_init.h',
+                      'src/core/lib/surface/channel_stack_type.h',
+                      'src/core/lib/surface/completion_queue.h',
+                      'src/core/lib/surface/event_string.h',
+                      'src/core/lib/surface/init.h',
+                      'src/core/lib/surface/lame_client.h',
+                      'src/core/lib/surface/server.h',
+                      'src/core/lib/transport/byte_stream.h',
+                      'src/core/lib/transport/connectivity_state.h',
+                      'src/core/lib/transport/metadata.h',
+                      'src/core/lib/transport/metadata_batch.h',
+                      'src/core/lib/transport/static_metadata.h',
+                      'src/core/lib/transport/transport.h',
+                      'src/core/lib/transport/transport_impl.h',
+                      'src/core/ext/transport/chttp2/transport/bin_decoder.h',
+                      'src/core/ext/transport/chttp2/transport/bin_encoder.h',
+                      'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
+                      'src/core/ext/transport/chttp2/transport/frame.h',
+                      'src/core/ext/transport/chttp2/transport/frame_data.h',
+                      'src/core/ext/transport/chttp2/transport/frame_goaway.h',
+                      'src/core/ext/transport/chttp2/transport/frame_ping.h',
+                      'src/core/ext/transport/chttp2/transport/frame_rst_stream.h',
+                      'src/core/ext/transport/chttp2/transport/frame_settings.h',
+                      'src/core/ext/transport/chttp2/transport/frame_window_update.h',
+                      'src/core/ext/transport/chttp2/transport/hpack_encoder.h',
+                      'src/core/ext/transport/chttp2/transport/hpack_parser.h',
+                      'src/core/ext/transport/chttp2/transport/hpack_table.h',
+                      'src/core/ext/transport/chttp2/transport/http2_errors.h',
+                      'src/core/ext/transport/chttp2/transport/huffsyms.h',
+                      'src/core/ext/transport/chttp2/transport/incoming_metadata.h',
+                      'src/core/ext/transport/chttp2/transport/internal.h',
+                      'src/core/ext/transport/chttp2/transport/status_conversion.h',
+                      'src/core/ext/transport/chttp2/transport/stream_map.h',
+                      'src/core/ext/transport/chttp2/transport/timeout_encoding.h',
+                      'src/core/ext/transport/chttp2/transport/varint.h',
+                      'src/core/ext/transport/chttp2/alpn/alpn.h',
+                      'src/core/lib/security/context/security_context.h',
+                      'src/core/lib/security/credentials/composite/composite_credentials.h',
+                      'src/core/lib/security/credentials/credentials.h',
+                      'src/core/lib/security/credentials/fake/fake_credentials.h',
+                      'src/core/lib/security/credentials/google_default/google_default_credentials.h',
+                      'src/core/lib/security/credentials/iam/iam_credentials.h',
+                      'src/core/lib/security/credentials/jwt/json_token.h',
+                      'src/core/lib/security/credentials/jwt/jwt_credentials.h',
+                      'src/core/lib/security/credentials/jwt/jwt_verifier.h',
+                      'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
+                      'src/core/lib/security/credentials/plugin/plugin_credentials.h',
+                      'src/core/lib/security/credentials/ssl/ssl_credentials.h',
+                      'src/core/lib/security/transport/auth_filters.h',
+                      'src/core/lib/security/transport/handshake.h',
+                      'src/core/lib/security/transport/secure_endpoint.h',
+                      'src/core/lib/security/transport/security_connector.h',
+                      'src/core/lib/security/transport/tsi_error.h',
+                      'src/core/lib/security/util/b64.h',
+                      'src/core/lib/security/util/json_util.h',
+                      'src/core/lib/tsi/fake_transport_security.h',
+                      'src/core/lib/tsi/ssl_transport_security.h',
+                      'src/core/lib/tsi/ssl_types.h',
+                      'src/core/lib/tsi/transport_security.h',
+                      'src/core/lib/tsi/transport_security_interface.h',
+                      'src/core/ext/client_config/client_channel.h',
+                      'src/core/ext/client_config/client_channel_factory.h',
+                      'src/core/ext/client_config/client_config.h',
+                      'src/core/ext/client_config/connector.h',
+                      'src/core/ext/client_config/initial_connect_string.h',
+                      'src/core/ext/client_config/lb_policy.h',
+                      'src/core/ext/client_config/lb_policy_factory.h',
+                      'src/core/ext/client_config/lb_policy_registry.h',
+                      'src/core/ext/client_config/parse_address.h',
+                      'src/core/ext/client_config/resolver.h',
+                      'src/core/ext/client_config/resolver_factory.h',
+                      'src/core/ext/client_config/resolver_registry.h',
+                      'src/core/ext/client_config/subchannel.h',
+                      'src/core/ext/client_config/subchannel_call_holder.h',
+                      'src/core/ext/client_config/subchannel_index.h',
+                      'src/core/ext/client_config/uri_parser.h',
+                      'src/core/ext/lb_policy/grpclb/grpclb.h',
+                      'src/core/ext/lb_policy/grpclb/load_balancer_api.h',
+                      'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
+                      'third_party/nanopb/pb.h',
+                      'third_party/nanopb/pb_common.h',
+                      'third_party/nanopb/pb_decode.h',
+                      'third_party/nanopb/pb_encode.h',
+                      'src/core/ext/load_reporting/load_reporting.h',
+                      'src/core/ext/load_reporting/load_reporting_filter.h',
+                      'src/core/ext/census/aggregation.h',
+                      'src/core/ext/census/census_interface.h',
+                      'src/core/ext/census/census_rpc_stats.h',
+                      'src/core/ext/census/gen/census.pb.h',
+                      'src/core/ext/census/grpc_filter.h',
+                      'src/core/ext/census/mlog.h',
+                      'src/core/ext/census/rpc_metric_id.h',
+                      'src/core/lib/surface/init.c',
+                      'src/core/lib/channel/channel_args.c',
+                      'src/core/lib/channel/channel_stack.c',
+                      'src/core/lib/channel/channel_stack_builder.c',
+                      'src/core/lib/channel/compress_filter.c',
+                      'src/core/lib/channel/connected_channel.c',
+                      'src/core/lib/channel/http_client_filter.c',
+                      'src/core/lib/channel/http_server_filter.c',
+                      'src/core/lib/compression/compression.c',
+                      'src/core/lib/compression/message_compress.c',
+                      'src/core/lib/debug/trace.c',
+                      'src/core/lib/http/format_request.c',
+                      'src/core/lib/http/httpcli.c',
+                      'src/core/lib/http/parser.c',
+                      'src/core/lib/iomgr/closure.c',
+                      'src/core/lib/iomgr/endpoint.c',
+                      'src/core/lib/iomgr/endpoint_pair_posix.c',
+                      'src/core/lib/iomgr/endpoint_pair_windows.c',
+                      'src/core/lib/iomgr/error.c',
+                      'src/core/lib/iomgr/ev_epoll_linux.c',
+                      'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
+                      'src/core/lib/iomgr/ev_poll_posix.c',
+                      'src/core/lib/iomgr/ev_posix.c',
+                      'src/core/lib/iomgr/exec_ctx.c',
+                      'src/core/lib/iomgr/executor.c',
+                      'src/core/lib/iomgr/iocp_windows.c',
+                      'src/core/lib/iomgr/iomgr.c',
+                      'src/core/lib/iomgr/iomgr_posix.c',
+                      'src/core/lib/iomgr/iomgr_windows.c',
+                      'src/core/lib/iomgr/load_file.c',
+                      'src/core/lib/iomgr/network_status_tracker.c',
+                      'src/core/lib/iomgr/polling_entity.c',
+                      'src/core/lib/iomgr/pollset_set_windows.c',
+                      'src/core/lib/iomgr/pollset_windows.c',
+                      'src/core/lib/iomgr/resolve_address_posix.c',
+                      'src/core/lib/iomgr/resolve_address_windows.c',
+                      'src/core/lib/iomgr/sockaddr_utils.c',
+                      'src/core/lib/iomgr/socket_utils_common_posix.c',
+                      'src/core/lib/iomgr/socket_utils_linux.c',
+                      'src/core/lib/iomgr/socket_utils_posix.c',
+                      'src/core/lib/iomgr/socket_windows.c',
+                      'src/core/lib/iomgr/tcp_client_posix.c',
+                      'src/core/lib/iomgr/tcp_client_windows.c',
+                      'src/core/lib/iomgr/tcp_posix.c',
+                      'src/core/lib/iomgr/tcp_server_posix.c',
+                      'src/core/lib/iomgr/tcp_server_windows.c',
+                      'src/core/lib/iomgr/tcp_windows.c',
+                      'src/core/lib/iomgr/time_averaged_stats.c',
+                      'src/core/lib/iomgr/timer.c',
+                      'src/core/lib/iomgr/timer_heap.c',
+                      'src/core/lib/iomgr/udp_server.c',
+                      'src/core/lib/iomgr/unix_sockets_posix.c',
+                      'src/core/lib/iomgr/unix_sockets_posix_noop.c',
+                      'src/core/lib/iomgr/wakeup_fd_eventfd.c',
+                      'src/core/lib/iomgr/wakeup_fd_nospecial.c',
+                      'src/core/lib/iomgr/wakeup_fd_pipe.c',
+                      'src/core/lib/iomgr/wakeup_fd_posix.c',
+                      'src/core/lib/iomgr/workqueue_posix.c',
+                      'src/core/lib/iomgr/workqueue_windows.c',
+                      'src/core/lib/json/json.c',
+                      'src/core/lib/json/json_reader.c',
+                      'src/core/lib/json/json_string.c',
+                      'src/core/lib/json/json_writer.c',
+                      'src/core/lib/surface/alarm.c',
+                      'src/core/lib/surface/api_trace.c',
+                      'src/core/lib/surface/byte_buffer.c',
+                      'src/core/lib/surface/byte_buffer_reader.c',
+                      'src/core/lib/surface/call.c',
+                      'src/core/lib/surface/call_details.c',
+                      'src/core/lib/surface/call_log_batch.c',
+                      'src/core/lib/surface/channel.c',
+                      'src/core/lib/surface/channel_init.c',
+                      'src/core/lib/surface/channel_ping.c',
+                      'src/core/lib/surface/channel_stack_type.c',
+                      'src/core/lib/surface/completion_queue.c',
+                      'src/core/lib/surface/event_string.c',
+                      'src/core/lib/surface/lame_client.c',
+                      'src/core/lib/surface/metadata_array.c',
+                      'src/core/lib/surface/server.c',
+                      'src/core/lib/surface/validate_metadata.c',
+                      'src/core/lib/surface/version.c',
+                      'src/core/lib/transport/byte_stream.c',
+                      'src/core/lib/transport/connectivity_state.c',
+                      'src/core/lib/transport/metadata.c',
+                      'src/core/lib/transport/metadata_batch.c',
+                      'src/core/lib/transport/static_metadata.c',
+                      'src/core/lib/transport/transport.c',
+                      'src/core/lib/transport/transport_op_string.c',
+                      'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
+                      'src/core/ext/transport/chttp2/transport/bin_decoder.c',
+                      'src/core/ext/transport/chttp2/transport/bin_encoder.c',
+                      'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
+                      'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
+                      'src/core/ext/transport/chttp2/transport/frame_data.c',
+                      'src/core/ext/transport/chttp2/transport/frame_goaway.c',
+                      'src/core/ext/transport/chttp2/transport/frame_ping.c',
+                      'src/core/ext/transport/chttp2/transport/frame_rst_stream.c',
+                      'src/core/ext/transport/chttp2/transport/frame_settings.c',
+                      'src/core/ext/transport/chttp2/transport/frame_window_update.c',
+                      'src/core/ext/transport/chttp2/transport/hpack_encoder.c',
+                      'src/core/ext/transport/chttp2/transport/hpack_parser.c',
+                      'src/core/ext/transport/chttp2/transport/hpack_table.c',
+                      'src/core/ext/transport/chttp2/transport/huffsyms.c',
+                      'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
+                      'src/core/ext/transport/chttp2/transport/parsing.c',
+                      'src/core/ext/transport/chttp2/transport/status_conversion.c',
+                      'src/core/ext/transport/chttp2/transport/stream_lists.c',
+                      'src/core/ext/transport/chttp2/transport/stream_map.c',
+                      'src/core/ext/transport/chttp2/transport/timeout_encoding.c',
+                      'src/core/ext/transport/chttp2/transport/varint.c',
+                      'src/core/ext/transport/chttp2/transport/writing.c',
+                      'src/core/ext/transport/chttp2/alpn/alpn.c',
+                      'src/core/lib/http/httpcli_security_connector.c',
+                      'src/core/lib/security/context/security_context.c',
+                      'src/core/lib/security/credentials/composite/composite_credentials.c',
+                      'src/core/lib/security/credentials/credentials.c',
+                      'src/core/lib/security/credentials/credentials_metadata.c',
+                      'src/core/lib/security/credentials/fake/fake_credentials.c',
+                      'src/core/lib/security/credentials/google_default/credentials_posix.c',
+                      'src/core/lib/security/credentials/google_default/credentials_windows.c',
+                      'src/core/lib/security/credentials/google_default/google_default_credentials.c',
+                      'src/core/lib/security/credentials/iam/iam_credentials.c',
+                      'src/core/lib/security/credentials/jwt/json_token.c',
+                      'src/core/lib/security/credentials/jwt/jwt_credentials.c',
+                      'src/core/lib/security/credentials/jwt/jwt_verifier.c',
+                      'src/core/lib/security/credentials/oauth2/oauth2_credentials.c',
+                      'src/core/lib/security/credentials/plugin/plugin_credentials.c',
+                      'src/core/lib/security/credentials/ssl/ssl_credentials.c',
+                      'src/core/lib/security/transport/client_auth_filter.c',
+                      'src/core/lib/security/transport/handshake.c',
+                      'src/core/lib/security/transport/secure_endpoint.c',
+                      'src/core/lib/security/transport/security_connector.c',
+                      'src/core/lib/security/transport/server_auth_filter.c',
+                      'src/core/lib/security/transport/tsi_error.c',
+                      'src/core/lib/security/util/b64.c',
+                      'src/core/lib/security/util/json_util.c',
+                      'src/core/lib/surface/init_secure.c',
+                      'src/core/lib/tsi/fake_transport_security.c',
+                      'src/core/lib/tsi/ssl_transport_security.c',
+                      'src/core/lib/tsi/transport_security.c',
+                      'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
+                      'src/core/ext/client_config/channel_connectivity.c',
+                      'src/core/ext/client_config/client_channel.c',
+                      'src/core/ext/client_config/client_channel_factory.c',
+                      'src/core/ext/client_config/client_config.c',
+                      'src/core/ext/client_config/client_config_plugin.c',
+                      'src/core/ext/client_config/connector.c',
+                      'src/core/ext/client_config/default_initial_connect_string.c',
+                      'src/core/ext/client_config/initial_connect_string.c',
+                      'src/core/ext/client_config/lb_policy.c',
+                      'src/core/ext/client_config/lb_policy_factory.c',
+                      'src/core/ext/client_config/lb_policy_registry.c',
+                      'src/core/ext/client_config/parse_address.c',
+                      'src/core/ext/client_config/resolver.c',
+                      'src/core/ext/client_config/resolver_factory.c',
+                      'src/core/ext/client_config/resolver_registry.c',
+                      'src/core/ext/client_config/subchannel.c',
+                      'src/core/ext/client_config/subchannel_call_holder.c',
+                      'src/core/ext/client_config/subchannel_index.c',
+                      'src/core/ext/client_config/uri_parser.c',
+                      'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
+                      'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
+                      'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
+                      'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
+                      'src/core/ext/lb_policy/grpclb/grpclb.c',
+                      'src/core/ext/lb_policy/grpclb/load_balancer_api.c',
+                      'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
+                      'third_party/nanopb/pb_common.c',
+                      'third_party/nanopb/pb_decode.c',
+                      'third_party/nanopb/pb_encode.c',
+                      'src/core/ext/lb_policy/pick_first/pick_first.c',
+                      'src/core/ext/lb_policy/round_robin/round_robin.c',
+                      'src/core/ext/resolver/dns/native/dns_resolver.c',
+                      'src/core/ext/resolver/sockaddr/sockaddr_resolver.c',
+                      'src/core/ext/load_reporting/load_reporting.c',
+                      'src/core/ext/load_reporting/load_reporting_filter.c',
+                      'src/core/ext/census/context.c',
+                      'src/core/ext/census/gen/census.pb.c',
+                      'src/core/ext/census/grpc_context.c',
+                      'src/core/ext/census/grpc_filter.c',
+                      'src/core/ext/census/grpc_plugin.c',
+                      'src/core/ext/census/initialize.c',
+                      'src/core/ext/census/mlog.c',
+                      'src/core/ext/census/operation.c',
+                      'src/core/ext/census/placeholders.c',
+                      'src/core/ext/census/tracing.c',
+                      'src/core/plugin_registry/grpc_plugin_registry.c'
+
+    ss.private_header_files = 'src/core/lib/profiling/timers.h',
+                              'src/core/lib/support/backoff.h',
+                              'src/core/lib/support/block_annotate.h',
+                              'src/core/lib/support/env.h',
+                              'src/core/lib/support/murmur_hash.h',
+                              'src/core/lib/support/stack_lockfree.h',
+                              'src/core/lib/support/string.h',
+                              'src/core/lib/support/string_windows.h',
+                              'src/core/lib/support/thd_internal.h',
+                              'src/core/lib/support/time_precise.h',
+                              'src/core/lib/support/tmpfile.h',
+                              'src/core/lib/channel/channel_args.h',
+                              'src/core/lib/channel/channel_stack.h',
+                              'src/core/lib/channel/channel_stack_builder.h',
+                              'src/core/lib/channel/compress_filter.h',
+                              'src/core/lib/channel/connected_channel.h',
+                              'src/core/lib/channel/context.h',
+                              'src/core/lib/channel/http_client_filter.h',
+                              'src/core/lib/channel/http_server_filter.h',
+                              'src/core/lib/compression/algorithm_metadata.h',
+                              'src/core/lib/compression/message_compress.h',
+                              'src/core/lib/debug/trace.h',
+                              'src/core/lib/http/format_request.h',
+                              'src/core/lib/http/httpcli.h',
+                              'src/core/lib/http/parser.h',
+                              'src/core/lib/iomgr/closure.h',
+                              'src/core/lib/iomgr/endpoint.h',
+                              'src/core/lib/iomgr/endpoint_pair.h',
+                              'src/core/lib/iomgr/error.h',
+                              'src/core/lib/iomgr/ev_epoll_linux.h',
+                              'src/core/lib/iomgr/ev_poll_and_epoll_posix.h',
+                              'src/core/lib/iomgr/ev_poll_posix.h',
+                              'src/core/lib/iomgr/ev_posix.h',
+                              'src/core/lib/iomgr/exec_ctx.h',
+                              'src/core/lib/iomgr/executor.h',
+                              'src/core/lib/iomgr/iocp_windows.h',
+                              'src/core/lib/iomgr/iomgr.h',
+                              'src/core/lib/iomgr/iomgr_internal.h',
+                              'src/core/lib/iomgr/iomgr_posix.h',
+                              'src/core/lib/iomgr/load_file.h',
+                              'src/core/lib/iomgr/network_status_tracker.h',
+                              'src/core/lib/iomgr/polling_entity.h',
+                              'src/core/lib/iomgr/pollset.h',
+                              'src/core/lib/iomgr/pollset_set.h',
+                              'src/core/lib/iomgr/pollset_set_windows.h',
+                              'src/core/lib/iomgr/pollset_windows.h',
+                              'src/core/lib/iomgr/resolve_address.h',
+                              'src/core/lib/iomgr/sockaddr.h',
+                              'src/core/lib/iomgr/sockaddr_posix.h',
+                              'src/core/lib/iomgr/sockaddr_utils.h',
+                              'src/core/lib/iomgr/sockaddr_windows.h',
+                              'src/core/lib/iomgr/socket_utils_posix.h',
+                              'src/core/lib/iomgr/socket_windows.h',
+                              'src/core/lib/iomgr/tcp_client.h',
+                              'src/core/lib/iomgr/tcp_posix.h',
+                              'src/core/lib/iomgr/tcp_server.h',
+                              'src/core/lib/iomgr/tcp_windows.h',
+                              'src/core/lib/iomgr/time_averaged_stats.h',
+                              'src/core/lib/iomgr/timer.h',
+                              'src/core/lib/iomgr/timer_heap.h',
+                              'src/core/lib/iomgr/udp_server.h',
+                              'src/core/lib/iomgr/unix_sockets_posix.h',
+                              'src/core/lib/iomgr/wakeup_fd_pipe.h',
+                              'src/core/lib/iomgr/wakeup_fd_posix.h',
+                              'src/core/lib/iomgr/workqueue.h',
+                              'src/core/lib/iomgr/workqueue_posix.h',
+                              'src/core/lib/iomgr/workqueue_windows.h',
+                              'src/core/lib/json/json.h',
+                              'src/core/lib/json/json_common.h',
+                              'src/core/lib/json/json_reader.h',
+                              'src/core/lib/json/json_writer.h',
+                              'src/core/lib/surface/api_trace.h',
+                              'src/core/lib/surface/call.h',
+                              'src/core/lib/surface/call_test_only.h',
+                              'src/core/lib/surface/channel.h',
+                              'src/core/lib/surface/channel_init.h',
+                              'src/core/lib/surface/channel_stack_type.h',
+                              'src/core/lib/surface/completion_queue.h',
+                              'src/core/lib/surface/event_string.h',
+                              'src/core/lib/surface/init.h',
+                              'src/core/lib/surface/lame_client.h',
+                              'src/core/lib/surface/server.h',
+                              'src/core/lib/transport/byte_stream.h',
+                              'src/core/lib/transport/connectivity_state.h',
+                              'src/core/lib/transport/metadata.h',
+                              'src/core/lib/transport/metadata_batch.h',
+                              'src/core/lib/transport/static_metadata.h',
+                              'src/core/lib/transport/transport.h',
+                              'src/core/lib/transport/transport_impl.h',
+                              'src/core/ext/transport/chttp2/transport/bin_decoder.h',
+                              'src/core/ext/transport/chttp2/transport/bin_encoder.h',
+                              'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
+                              'src/core/ext/transport/chttp2/transport/frame.h',
+                              'src/core/ext/transport/chttp2/transport/frame_data.h',
+                              'src/core/ext/transport/chttp2/transport/frame_goaway.h',
+                              'src/core/ext/transport/chttp2/transport/frame_ping.h',
+                              'src/core/ext/transport/chttp2/transport/frame_rst_stream.h',
+                              'src/core/ext/transport/chttp2/transport/frame_settings.h',
+                              'src/core/ext/transport/chttp2/transport/frame_window_update.h',
+                              'src/core/ext/transport/chttp2/transport/hpack_encoder.h',
+                              'src/core/ext/transport/chttp2/transport/hpack_parser.h',
+                              'src/core/ext/transport/chttp2/transport/hpack_table.h',
+                              'src/core/ext/transport/chttp2/transport/http2_errors.h',
+                              'src/core/ext/transport/chttp2/transport/huffsyms.h',
+                              'src/core/ext/transport/chttp2/transport/incoming_metadata.h',
+                              'src/core/ext/transport/chttp2/transport/internal.h',
+                              'src/core/ext/transport/chttp2/transport/status_conversion.h',
+                              'src/core/ext/transport/chttp2/transport/stream_map.h',
+                              'src/core/ext/transport/chttp2/transport/timeout_encoding.h',
+                              'src/core/ext/transport/chttp2/transport/varint.h',
+                              'src/core/ext/transport/chttp2/alpn/alpn.h',
+                              'src/core/lib/security/context/security_context.h',
+                              'src/core/lib/security/credentials/composite/composite_credentials.h',
+                              'src/core/lib/security/credentials/credentials.h',
+                              'src/core/lib/security/credentials/fake/fake_credentials.h',
+                              'src/core/lib/security/credentials/google_default/google_default_credentials.h',
+                              'src/core/lib/security/credentials/iam/iam_credentials.h',
+                              'src/core/lib/security/credentials/jwt/json_token.h',
+                              'src/core/lib/security/credentials/jwt/jwt_credentials.h',
+                              'src/core/lib/security/credentials/jwt/jwt_verifier.h',
+                              'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
+                              'src/core/lib/security/credentials/plugin/plugin_credentials.h',
+                              'src/core/lib/security/credentials/ssl/ssl_credentials.h',
+                              'src/core/lib/security/transport/auth_filters.h',
+                              'src/core/lib/security/transport/handshake.h',
+                              'src/core/lib/security/transport/secure_endpoint.h',
+                              'src/core/lib/security/transport/security_connector.h',
+                              'src/core/lib/security/transport/tsi_error.h',
+                              'src/core/lib/security/util/b64.h',
+                              'src/core/lib/security/util/json_util.h',
+                              'src/core/lib/tsi/fake_transport_security.h',
+                              'src/core/lib/tsi/ssl_transport_security.h',
+                              'src/core/lib/tsi/ssl_types.h',
+                              'src/core/lib/tsi/transport_security.h',
+                              'src/core/lib/tsi/transport_security_interface.h',
+                              'src/core/ext/client_config/client_channel.h',
+                              'src/core/ext/client_config/client_channel_factory.h',
+                              'src/core/ext/client_config/client_config.h',
+                              'src/core/ext/client_config/connector.h',
+                              'src/core/ext/client_config/initial_connect_string.h',
+                              'src/core/ext/client_config/lb_policy.h',
+                              'src/core/ext/client_config/lb_policy_factory.h',
+                              'src/core/ext/client_config/lb_policy_registry.h',
+                              'src/core/ext/client_config/parse_address.h',
+                              'src/core/ext/client_config/resolver.h',
+                              'src/core/ext/client_config/resolver_factory.h',
+                              'src/core/ext/client_config/resolver_registry.h',
+                              'src/core/ext/client_config/subchannel.h',
+                              'src/core/ext/client_config/subchannel_call_holder.h',
+                              'src/core/ext/client_config/subchannel_index.h',
+                              'src/core/ext/client_config/uri_parser.h',
+                              'src/core/ext/lb_policy/grpclb/grpclb.h',
+                              'src/core/ext/lb_policy/grpclb/load_balancer_api.h',
+                              'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
+                              'third_party/nanopb/pb.h',
+                              'third_party/nanopb/pb_common.h',
+                              'third_party/nanopb/pb_decode.h',
+                              'third_party/nanopb/pb_encode.h',
+                              'src/core/ext/load_reporting/load_reporting.h',
+                              'src/core/ext/load_reporting/load_reporting_filter.h',
+                              'src/core/ext/census/aggregation.h',
+                              'src/core/ext/census/census_interface.h',
+                              'src/core/ext/census/census_rpc_stats.h',
+                              'src/core/ext/census/gen/census.pb.h',
+                              'src/core/ext/census/grpc_filter.h',
+                              'src/core/ext/census/mlog.h',
+                              'src/core/ext/census/rpc_metric_id.h'
+  end
+end
diff --git a/src/python/grpcio/grpc/framework/foundation/later.py b/gRPC-ProtoRPC.podspec
similarity index 55%
rename from src/python/grpcio/grpc/framework/foundation/later.py
rename to gRPC-ProtoRPC.podspec
index 1d1e0650413da99f824707fe31010fdb24b18ad6..9cc33c7dbd0e76c2c318f7fa18052b25fae174a1 100644
--- a/src/python/grpcio/grpc/framework/foundation/later.py
+++ b/gRPC-ProtoRPC.podspec
@@ -1,3 +1,9 @@
+# GRPC CocoaPods podspec
+# This file has been automatically generated from a template file.
+# Please look at the templates directory instead.
+# This file can be regenerated from the template by running
+# tools/buildgen/generate_projects.sh
+
 # Copyright 2015, Google Inc.
 # All rights reserved.
 #
@@ -27,25 +33,37 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Enables scheduling execution at a later time."""
 
-import time
+Pod::Spec.new do |s|
+  s.name     = 'gRPC-ProtoRPC'
+  version = '0.14.0'
+  s.version  = version
+  s.summary  = 'RPC library for Protocol Buffers, based on gRPC'
+  s.homepage = 'http://www.grpc.io'
+  s.license  = 'New BSD'
+  s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
 
-from grpc.framework.foundation import _timer_future
+  s.source = {
+    :git => 'https://github.com/grpc/grpc.git',
+    :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
+  }
 
+  s.ios.deployment_target = '7.1'
+  s.osx.deployment_target = '10.9'
 
-def later(delay, computation):
-  """Schedules later execution of a callable.
+  name = 'ProtoRPC'
+  s.module_name = name
+  s.header_dir = name
 
-  Args:
-    delay: Any numeric value. Represents the minimum length of time in seconds
-      to allow to pass before beginning the computation. No guarantees are made
-      about the maximum length of time that will pass.
-    computation: A callable that accepts no arguments.
+  src_dir = 'src/objective-c/ProtoRPC'
+  s.source_files = "#{src_dir}/*.{h,m}"
+  s.header_mappings_dir = "#{src_dir}"
 
-  Returns:
-    A Future representing the scheduled computation.
-  """
-  timer_future = _timer_future.TimerFuture(time.time() + delay, computation)
-  timer_future.start()
-  return timer_future
+  s.dependency 'gRPC', version
+  s.dependency 'gRPC-RxLibrary', version
+  s.dependency 'Protobuf', '~> 3.0.0-beta-3.1'
+  # This is needed by all pods that depend on Protobuf:
+  s.pod_target_xcconfig = {
+    'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+  }
+end
diff --git a/src/python/grpcio/grpc/framework/foundation/activated.py b/gRPC-RxLibrary.podspec
similarity index 60%
rename from src/python/grpcio/grpc/framework/foundation/activated.py
rename to gRPC-RxLibrary.podspec
index 8b8e4f45b5cc5ed79c0ad556349bf4559965a9f9..6263878213e010a538d597aa01c5b9dcfcf803da 100644
--- a/src/python/grpcio/grpc/framework/foundation/activated.py
+++ b/gRPC-RxLibrary.podspec
@@ -1,3 +1,9 @@
+# GRPC CocoaPods podspec
+# This file has been automatically generated from a template file.
+# Please look at the templates directory instead.
+# This file can be regenerated from the template by running
+# tools/buildgen/generate_projects.sh
+
 # Copyright 2015, Google Inc.
 # All rights reserved.
 #
@@ -27,39 +33,30 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Interfaces related to streams of values or objects."""
-
-import abc
-
-import six
-
-class Activated(six.with_metaclass(abc.ABCMeta)):
-  """Interface for objects that may be started and stopped.
-
-  Values implementing this type must also implement the context manager
-  protocol.
-  """
-
-  @abc.abstractmethod
-  def __enter__(self):
-    """See the context manager protocol for specification."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def __exit__(self, exc_type, exc_val, exc_tb):
-    """See the context manager protocol for specification."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def start(self):
-    """Activates this object.
-
-    Returns:
-      A value equal to the value returned by this object's __enter__ method.
-    """
-    raise NotImplementedError()
 
-  @abc.abstractmethod
-  def stop(self):
-    """Deactivates this object."""
-    raise NotImplementedError()
+Pod::Spec.new do |s|
+  s.name     = 'gRPC-RxLibrary'
+  version = '0.14.0'
+  s.version  = version
+  s.summary  = 'Reactive Extensions library for iOS/OSX.'
+  s.homepage = 'http://www.grpc.io'
+  s.license  = 'New BSD'
+  s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
+
+  s.source = {
+    :git => 'https://github.com/grpc/grpc.git',
+    :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
+  }
+
+  s.ios.deployment_target = '7.1'
+  s.osx.deployment_target = '10.9'
+
+  name = 'RxLibrary'
+  s.module_name = name
+  s.header_dir = name
+
+  src_dir = 'src/objective-c/RxLibrary'
+  s.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}"
+  s.private_header_files = "#{src_dir}/private/*.h"
+  s.header_mappings_dir = "#{src_dir}"
+end
diff --git a/gRPC.podspec b/gRPC.podspec
index 91999208cefdc873f5daa2e50d0df1eff1ab41ac..e5556cc5448f9be2f2145131b414433a1616e6a7 100644
--- a/gRPC.podspec
+++ b/gRPC.podspec
@@ -43,702 +43,26 @@ Pod::Spec.new do |s|
   s.license  = 'New BSD'
   s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
 
-  s.source = { :git => 'https://github.com/grpc/grpc.git',
-               :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
-               :submodules => true }
-
+  s.source = {
+    :git => 'https://github.com/grpc/grpc.git',
+    :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
+  }
 
   s.ios.deployment_target = '7.1'
   s.osx.deployment_target = '10.9'
-  s.requires_arc = true
-
-  objc_dir = 'src/objective-c'
-
-  # Reactive Extensions library for iOS.
-  s.subspec 'RxLibrary' do |ss|
-    src_dir = "#{objc_dir}/RxLibrary"
-    ss.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}"
-    ss.private_header_files = "#{src_dir}/private/*.h"
-    ss.header_mappings_dir = "#{objc_dir}"
-  end
-
-  # Core cross-platform gRPC library, written in C.
-  s.subspec 'C-Core' do |ss|
-    ss.source_files = 'src/core/lib/profiling/timers.h',
-                      'src/core/lib/support/backoff.h',
-                      'src/core/lib/support/block_annotate.h',
-                      'src/core/lib/support/env.h',
-                      'src/core/lib/support/murmur_hash.h',
-                      'src/core/lib/support/stack_lockfree.h',
-                      'src/core/lib/support/string.h',
-                      'src/core/lib/support/string_windows.h',
-                      'src/core/lib/support/thd_internal.h',
-                      'src/core/lib/support/time_precise.h',
-                      'src/core/lib/support/tmpfile.h',
-                      'include/grpc/support/alloc.h',
-                      'include/grpc/support/atm.h',
-                      'include/grpc/support/atm_gcc_atomic.h',
-                      'include/grpc/support/atm_gcc_sync.h',
-                      'include/grpc/support/atm_windows.h',
-                      'include/grpc/support/avl.h',
-                      'include/grpc/support/cmdline.h',
-                      'include/grpc/support/cpu.h',
-                      'include/grpc/support/histogram.h',
-                      'include/grpc/support/host_port.h',
-                      'include/grpc/support/log.h',
-                      'include/grpc/support/log_windows.h',
-                      'include/grpc/support/port_platform.h',
-                      'include/grpc/support/slice.h',
-                      'include/grpc/support/slice_buffer.h',
-                      'include/grpc/support/string_util.h',
-                      'include/grpc/support/subprocess.h',
-                      'include/grpc/support/sync.h',
-                      'include/grpc/support/sync_generic.h',
-                      'include/grpc/support/sync_posix.h',
-                      'include/grpc/support/sync_windows.h',
-                      'include/grpc/support/thd.h',
-                      'include/grpc/support/time.h',
-                      'include/grpc/support/tls.h',
-                      'include/grpc/support/tls_gcc.h',
-                      'include/grpc/support/tls_msvc.h',
-                      'include/grpc/support/tls_pthread.h',
-                      'include/grpc/support/useful.h',
-                      'include/grpc/impl/codegen/alloc.h',
-                      'include/grpc/impl/codegen/atm.h',
-                      'include/grpc/impl/codegen/atm_gcc_atomic.h',
-                      'include/grpc/impl/codegen/atm_gcc_sync.h',
-                      'include/grpc/impl/codegen/atm_windows.h',
-                      'include/grpc/impl/codegen/log.h',
-                      'include/grpc/impl/codegen/port_platform.h',
-                      'include/grpc/impl/codegen/slice.h',
-                      'include/grpc/impl/codegen/slice_buffer.h',
-                      'include/grpc/impl/codegen/sync.h',
-                      'include/grpc/impl/codegen/sync_generic.h',
-                      'include/grpc/impl/codegen/sync_posix.h',
-                      'include/grpc/impl/codegen/sync_windows.h',
-                      'include/grpc/impl/codegen/time.h',
-                      'src/core/lib/profiling/basic_timers.c',
-                      'src/core/lib/profiling/stap_timers.c',
-                      'src/core/lib/support/alloc.c',
-                      'src/core/lib/support/avl.c',
-                      'src/core/lib/support/backoff.c',
-                      'src/core/lib/support/cmdline.c',
-                      'src/core/lib/support/cpu_iphone.c',
-                      'src/core/lib/support/cpu_linux.c',
-                      'src/core/lib/support/cpu_posix.c',
-                      'src/core/lib/support/cpu_windows.c',
-                      'src/core/lib/support/env_linux.c',
-                      'src/core/lib/support/env_posix.c',
-                      'src/core/lib/support/env_windows.c',
-                      'src/core/lib/support/histogram.c',
-                      'src/core/lib/support/host_port.c',
-                      'src/core/lib/support/log.c',
-                      'src/core/lib/support/log_android.c',
-                      'src/core/lib/support/log_linux.c',
-                      'src/core/lib/support/log_posix.c',
-                      'src/core/lib/support/log_windows.c',
-                      'src/core/lib/support/murmur_hash.c',
-                      'src/core/lib/support/slice.c',
-                      'src/core/lib/support/slice_buffer.c',
-                      'src/core/lib/support/stack_lockfree.c',
-                      'src/core/lib/support/string.c',
-                      'src/core/lib/support/string_posix.c',
-                      'src/core/lib/support/string_util_windows.c',
-                      'src/core/lib/support/string_windows.c',
-                      'src/core/lib/support/subprocess_posix.c',
-                      'src/core/lib/support/subprocess_windows.c',
-                      'src/core/lib/support/sync.c',
-                      'src/core/lib/support/sync_posix.c',
-                      'src/core/lib/support/sync_windows.c',
-                      'src/core/lib/support/thd.c',
-                      'src/core/lib/support/thd_posix.c',
-                      'src/core/lib/support/thd_windows.c',
-                      'src/core/lib/support/time.c',
-                      'src/core/lib/support/time_posix.c',
-                      'src/core/lib/support/time_precise.c',
-                      'src/core/lib/support/time_windows.c',
-                      'src/core/lib/support/tls_pthread.c',
-                      'src/core/lib/support/tmpfile_msys.c',
-                      'src/core/lib/support/tmpfile_posix.c',
-                      'src/core/lib/support/tmpfile_windows.c',
-                      'src/core/lib/support/wrap_memcpy.c',
-                      'src/core/lib/channel/channel_args.h',
-                      'src/core/lib/channel/channel_stack.h',
-                      'src/core/lib/channel/channel_stack_builder.h',
-                      'src/core/lib/channel/compress_filter.h',
-                      'src/core/lib/channel/connected_channel.h',
-                      'src/core/lib/channel/context.h',
-                      'src/core/lib/channel/http_client_filter.h',
-                      'src/core/lib/channel/http_server_filter.h',
-                      'src/core/lib/compression/algorithm_metadata.h',
-                      'src/core/lib/compression/message_compress.h',
-                      'src/core/lib/debug/trace.h',
-                      'src/core/lib/http/format_request.h',
-                      'src/core/lib/http/httpcli.h',
-                      'src/core/lib/http/parser.h',
-                      'src/core/lib/iomgr/closure.h',
-                      'src/core/lib/iomgr/endpoint.h',
-                      'src/core/lib/iomgr/endpoint_pair.h',
-                      'src/core/lib/iomgr/error.h',
-                      'src/core/lib/iomgr/ev_poll_and_epoll_posix.h',
-                      'src/core/lib/iomgr/ev_poll_posix.h',
-                      'src/core/lib/iomgr/ev_posix.h',
-                      'src/core/lib/iomgr/exec_ctx.h',
-                      'src/core/lib/iomgr/executor.h',
-                      'src/core/lib/iomgr/iocp_windows.h',
-                      'src/core/lib/iomgr/iomgr.h',
-                      'src/core/lib/iomgr/iomgr_internal.h',
-                      'src/core/lib/iomgr/iomgr_posix.h',
-                      'src/core/lib/iomgr/load_file.h',
-                      'src/core/lib/iomgr/polling_entity.h',
-                      'src/core/lib/iomgr/pollset.h',
-                      'src/core/lib/iomgr/pollset_set.h',
-                      'src/core/lib/iomgr/pollset_set_windows.h',
-                      'src/core/lib/iomgr/pollset_windows.h',
-                      'src/core/lib/iomgr/resolve_address.h',
-                      'src/core/lib/iomgr/sockaddr.h',
-                      'src/core/lib/iomgr/sockaddr_posix.h',
-                      'src/core/lib/iomgr/sockaddr_utils.h',
-                      'src/core/lib/iomgr/sockaddr_windows.h',
-                      'src/core/lib/iomgr/socket_utils_posix.h',
-                      'src/core/lib/iomgr/socket_windows.h',
-                      'src/core/lib/iomgr/tcp_client.h',
-                      'src/core/lib/iomgr/tcp_posix.h',
-                      'src/core/lib/iomgr/tcp_server.h',
-                      'src/core/lib/iomgr/tcp_windows.h',
-                      'src/core/lib/iomgr/time_averaged_stats.h',
-                      'src/core/lib/iomgr/timer.h',
-                      'src/core/lib/iomgr/timer_heap.h',
-                      'src/core/lib/iomgr/udp_server.h',
-                      'src/core/lib/iomgr/unix_sockets_posix.h',
-                      'src/core/lib/iomgr/wakeup_fd_pipe.h',
-                      'src/core/lib/iomgr/wakeup_fd_posix.h',
-                      'src/core/lib/iomgr/workqueue.h',
-                      'src/core/lib/iomgr/workqueue_posix.h',
-                      'src/core/lib/iomgr/workqueue_windows.h',
-                      'src/core/lib/json/json.h',
-                      'src/core/lib/json/json_common.h',
-                      'src/core/lib/json/json_reader.h',
-                      'src/core/lib/json/json_writer.h',
-                      'src/core/lib/surface/api_trace.h',
-                      'src/core/lib/surface/call.h',
-                      'src/core/lib/surface/call_test_only.h',
-                      'src/core/lib/surface/channel.h',
-                      'src/core/lib/surface/channel_init.h',
-                      'src/core/lib/surface/channel_stack_type.h',
-                      'src/core/lib/surface/completion_queue.h',
-                      'src/core/lib/surface/event_string.h',
-                      'src/core/lib/surface/init.h',
-                      'src/core/lib/surface/lame_client.h',
-                      'src/core/lib/surface/server.h',
-                      'src/core/lib/surface/surface_trace.h',
-                      'src/core/lib/transport/byte_stream.h',
-                      'src/core/lib/transport/connectivity_state.h',
-                      'src/core/lib/transport/metadata.h',
-                      'src/core/lib/transport/metadata_batch.h',
-                      'src/core/lib/transport/static_metadata.h',
-                      'src/core/lib/transport/transport.h',
-                      'src/core/lib/transport/transport_impl.h',
-                      'src/core/ext/transport/chttp2/transport/bin_decoder.h',
-                      'src/core/ext/transport/chttp2/transport/bin_encoder.h',
-                      'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
-                      'src/core/ext/transport/chttp2/transport/frame.h',
-                      'src/core/ext/transport/chttp2/transport/frame_data.h',
-                      'src/core/ext/transport/chttp2/transport/frame_goaway.h',
-                      'src/core/ext/transport/chttp2/transport/frame_ping.h',
-                      'src/core/ext/transport/chttp2/transport/frame_rst_stream.h',
-                      'src/core/ext/transport/chttp2/transport/frame_settings.h',
-                      'src/core/ext/transport/chttp2/transport/frame_window_update.h',
-                      'src/core/ext/transport/chttp2/transport/hpack_encoder.h',
-                      'src/core/ext/transport/chttp2/transport/hpack_parser.h',
-                      'src/core/ext/transport/chttp2/transport/hpack_table.h',
-                      'src/core/ext/transport/chttp2/transport/http2_errors.h',
-                      'src/core/ext/transport/chttp2/transport/huffsyms.h',
-                      'src/core/ext/transport/chttp2/transport/incoming_metadata.h',
-                      'src/core/ext/transport/chttp2/transport/internal.h',
-                      'src/core/ext/transport/chttp2/transport/status_conversion.h',
-                      'src/core/ext/transport/chttp2/transport/stream_map.h',
-                      'src/core/ext/transport/chttp2/transport/timeout_encoding.h',
-                      'src/core/ext/transport/chttp2/transport/varint.h',
-                      'src/core/ext/transport/chttp2/alpn/alpn.h',
-                      'src/core/lib/security/context/security_context.h',
-                      'src/core/lib/security/credentials/composite/composite_credentials.h',
-                      'src/core/lib/security/credentials/credentials.h',
-                      'src/core/lib/security/credentials/fake/fake_credentials.h',
-                      'src/core/lib/security/credentials/google_default/google_default_credentials.h',
-                      'src/core/lib/security/credentials/iam/iam_credentials.h',
-                      'src/core/lib/security/credentials/jwt/json_token.h',
-                      'src/core/lib/security/credentials/jwt/jwt_credentials.h',
-                      'src/core/lib/security/credentials/jwt/jwt_verifier.h',
-                      'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
-                      'src/core/lib/security/credentials/plugin/plugin_credentials.h',
-                      'src/core/lib/security/credentials/ssl/ssl_credentials.h',
-                      'src/core/lib/security/transport/auth_filters.h',
-                      'src/core/lib/security/transport/handshake.h',
-                      'src/core/lib/security/transport/secure_endpoint.h',
-                      'src/core/lib/security/transport/security_connector.h',
-                      'src/core/lib/security/transport/tsi_error.h',
-                      'src/core/lib/security/util/b64.h',
-                      'src/core/lib/security/util/json_util.h',
-                      'src/core/lib/tsi/fake_transport_security.h',
-                      'src/core/lib/tsi/ssl_transport_security.h',
-                      'src/core/lib/tsi/ssl_types.h',
-                      'src/core/lib/tsi/transport_security.h',
-                      'src/core/lib/tsi/transport_security_interface.h',
-                      'src/core/ext/client_config/client_channel.h',
-                      'src/core/ext/client_config/client_channel_factory.h',
-                      'src/core/ext/client_config/client_config.h',
-                      'src/core/ext/client_config/connector.h',
-                      'src/core/ext/client_config/initial_connect_string.h',
-                      'src/core/ext/client_config/lb_policy.h',
-                      'src/core/ext/client_config/lb_policy_factory.h',
-                      'src/core/ext/client_config/lb_policy_registry.h',
-                      'src/core/ext/client_config/parse_address.h',
-                      'src/core/ext/client_config/resolver.h',
-                      'src/core/ext/client_config/resolver_factory.h',
-                      'src/core/ext/client_config/resolver_registry.h',
-                      'src/core/ext/client_config/subchannel.h',
-                      'src/core/ext/client_config/subchannel_call_holder.h',
-                      'src/core/ext/client_config/subchannel_index.h',
-                      'src/core/ext/client_config/uri_parser.h',
-                      'src/core/ext/lb_policy/grpclb/grpclb.h',
-                      'src/core/ext/lb_policy/grpclb/load_balancer_api.h',
-                      'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
-                      'third_party/nanopb/pb.h',
-                      'third_party/nanopb/pb_common.h',
-                      'third_party/nanopb/pb_decode.h',
-                      'third_party/nanopb/pb_encode.h',
-                      'src/core/ext/load_reporting/load_reporting.h',
-                      'src/core/ext/load_reporting/load_reporting_filter.h',
-                      'src/core/ext/census/aggregation.h',
-                      'src/core/ext/census/census_interface.h',
-                      'src/core/ext/census/census_rpc_stats.h',
-                      'src/core/ext/census/gen/census.pb.h',
-                      'src/core/ext/census/grpc_filter.h',
-                      'src/core/ext/census/mlog.h',
-                      'src/core/ext/census/rpc_metric_id.h',
-                      'include/grpc/byte_buffer.h',
-                      'include/grpc/byte_buffer_reader.h',
-                      'include/grpc/compression.h',
-                      'include/grpc/grpc.h',
-                      'include/grpc/grpc_posix.h',
-                      'include/grpc/status.h',
-                      'include/grpc/impl/codegen/byte_buffer.h',
-                      'include/grpc/impl/codegen/byte_buffer_reader.h',
-                      'include/grpc/impl/codegen/compression_types.h',
-                      'include/grpc/impl/codegen/connectivity_state.h',
-                      'include/grpc/impl/codegen/grpc_types.h',
-                      'include/grpc/impl/codegen/propagation_bits.h',
-                      'include/grpc/impl/codegen/status.h',
-                      'include/grpc/impl/codegen/alloc.h',
-                      'include/grpc/impl/codegen/atm.h',
-                      'include/grpc/impl/codegen/atm_gcc_atomic.h',
-                      'include/grpc/impl/codegen/atm_gcc_sync.h',
-                      'include/grpc/impl/codegen/atm_windows.h',
-                      'include/grpc/impl/codegen/log.h',
-                      'include/grpc/impl/codegen/port_platform.h',
-                      'include/grpc/impl/codegen/slice.h',
-                      'include/grpc/impl/codegen/slice_buffer.h',
-                      'include/grpc/impl/codegen/sync.h',
-                      'include/grpc/impl/codegen/sync_generic.h',
-                      'include/grpc/impl/codegen/sync_posix.h',
-                      'include/grpc/impl/codegen/sync_windows.h',
-                      'include/grpc/impl/codegen/time.h',
-                      'include/grpc/grpc_security.h',
-                      'include/grpc/grpc_security_constants.h',
-                      'include/grpc/census.h',
-                      'src/core/lib/surface/init.c',
-                      'src/core/lib/channel/channel_args.c',
-                      'src/core/lib/channel/channel_stack.c',
-                      'src/core/lib/channel/channel_stack_builder.c',
-                      'src/core/lib/channel/compress_filter.c',
-                      'src/core/lib/channel/connected_channel.c',
-                      'src/core/lib/channel/http_client_filter.c',
-                      'src/core/lib/channel/http_server_filter.c',
-                      'src/core/lib/compression/compression.c',
-                      'src/core/lib/compression/message_compress.c',
-                      'src/core/lib/debug/trace.c',
-                      'src/core/lib/http/format_request.c',
-                      'src/core/lib/http/httpcli.c',
-                      'src/core/lib/http/parser.c',
-                      'src/core/lib/iomgr/closure.c',
-                      'src/core/lib/iomgr/endpoint.c',
-                      'src/core/lib/iomgr/endpoint_pair_posix.c',
-                      'src/core/lib/iomgr/endpoint_pair_windows.c',
-                      'src/core/lib/iomgr/error.c',
-                      'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
-                      'src/core/lib/iomgr/ev_poll_posix.c',
-                      'src/core/lib/iomgr/ev_posix.c',
-                      'src/core/lib/iomgr/exec_ctx.c',
-                      'src/core/lib/iomgr/executor.c',
-                      'src/core/lib/iomgr/iocp_windows.c',
-                      'src/core/lib/iomgr/iomgr.c',
-                      'src/core/lib/iomgr/iomgr_posix.c',
-                      'src/core/lib/iomgr/iomgr_windows.c',
-                      'src/core/lib/iomgr/load_file.c',
-                      'src/core/lib/iomgr/polling_entity.c',
-                      'src/core/lib/iomgr/pollset_set_windows.c',
-                      'src/core/lib/iomgr/pollset_windows.c',
-                      'src/core/lib/iomgr/resolve_address_posix.c',
-                      'src/core/lib/iomgr/resolve_address_windows.c',
-                      'src/core/lib/iomgr/sockaddr_utils.c',
-                      'src/core/lib/iomgr/socket_utils_common_posix.c',
-                      'src/core/lib/iomgr/socket_utils_linux.c',
-                      'src/core/lib/iomgr/socket_utils_posix.c',
-                      'src/core/lib/iomgr/socket_windows.c',
-                      'src/core/lib/iomgr/tcp_client_posix.c',
-                      'src/core/lib/iomgr/tcp_client_windows.c',
-                      'src/core/lib/iomgr/tcp_posix.c',
-                      'src/core/lib/iomgr/tcp_server_posix.c',
-                      'src/core/lib/iomgr/tcp_server_windows.c',
-                      'src/core/lib/iomgr/tcp_windows.c',
-                      'src/core/lib/iomgr/time_averaged_stats.c',
-                      'src/core/lib/iomgr/timer.c',
-                      'src/core/lib/iomgr/timer_heap.c',
-                      'src/core/lib/iomgr/udp_server.c',
-                      'src/core/lib/iomgr/unix_sockets_posix.c',
-                      'src/core/lib/iomgr/unix_sockets_posix_noop.c',
-                      'src/core/lib/iomgr/wakeup_fd_eventfd.c',
-                      'src/core/lib/iomgr/wakeup_fd_nospecial.c',
-                      'src/core/lib/iomgr/wakeup_fd_pipe.c',
-                      'src/core/lib/iomgr/wakeup_fd_posix.c',
-                      'src/core/lib/iomgr/workqueue_posix.c',
-                      'src/core/lib/iomgr/workqueue_windows.c',
-                      'src/core/lib/json/json.c',
-                      'src/core/lib/json/json_reader.c',
-                      'src/core/lib/json/json_string.c',
-                      'src/core/lib/json/json_writer.c',
-                      'src/core/lib/surface/alarm.c',
-                      'src/core/lib/surface/api_trace.c',
-                      'src/core/lib/surface/byte_buffer.c',
-                      'src/core/lib/surface/byte_buffer_reader.c',
-                      'src/core/lib/surface/call.c',
-                      'src/core/lib/surface/call_details.c',
-                      'src/core/lib/surface/call_log_batch.c',
-                      'src/core/lib/surface/channel.c',
-                      'src/core/lib/surface/channel_init.c',
-                      'src/core/lib/surface/channel_ping.c',
-                      'src/core/lib/surface/channel_stack_type.c',
-                      'src/core/lib/surface/completion_queue.c',
-                      'src/core/lib/surface/event_string.c',
-                      'src/core/lib/surface/lame_client.c',
-                      'src/core/lib/surface/metadata_array.c',
-                      'src/core/lib/surface/server.c',
-                      'src/core/lib/surface/validate_metadata.c',
-                      'src/core/lib/surface/version.c',
-                      'src/core/lib/transport/byte_stream.c',
-                      'src/core/lib/transport/connectivity_state.c',
-                      'src/core/lib/transport/metadata.c',
-                      'src/core/lib/transport/metadata_batch.c',
-                      'src/core/lib/transport/static_metadata.c',
-                      'src/core/lib/transport/transport.c',
-                      'src/core/lib/transport/transport_op_string.c',
-                      'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
-                      'src/core/ext/transport/chttp2/transport/bin_decoder.c',
-                      'src/core/ext/transport/chttp2/transport/bin_encoder.c',
-                      'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
-                      'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
-                      'src/core/ext/transport/chttp2/transport/frame_data.c',
-                      'src/core/ext/transport/chttp2/transport/frame_goaway.c',
-                      'src/core/ext/transport/chttp2/transport/frame_ping.c',
-                      'src/core/ext/transport/chttp2/transport/frame_rst_stream.c',
-                      'src/core/ext/transport/chttp2/transport/frame_settings.c',
-                      'src/core/ext/transport/chttp2/transport/frame_window_update.c',
-                      'src/core/ext/transport/chttp2/transport/hpack_encoder.c',
-                      'src/core/ext/transport/chttp2/transport/hpack_parser.c',
-                      'src/core/ext/transport/chttp2/transport/hpack_table.c',
-                      'src/core/ext/transport/chttp2/transport/huffsyms.c',
-                      'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
-                      'src/core/ext/transport/chttp2/transport/parsing.c',
-                      'src/core/ext/transport/chttp2/transport/status_conversion.c',
-                      'src/core/ext/transport/chttp2/transport/stream_lists.c',
-                      'src/core/ext/transport/chttp2/transport/stream_map.c',
-                      'src/core/ext/transport/chttp2/transport/timeout_encoding.c',
-                      'src/core/ext/transport/chttp2/transport/varint.c',
-                      'src/core/ext/transport/chttp2/transport/writing.c',
-                      'src/core/ext/transport/chttp2/alpn/alpn.c',
-                      'src/core/lib/http/httpcli_security_connector.c',
-                      'src/core/lib/security/context/security_context.c',
-                      'src/core/lib/security/credentials/composite/composite_credentials.c',
-                      'src/core/lib/security/credentials/credentials.c',
-                      'src/core/lib/security/credentials/credentials_metadata.c',
-                      'src/core/lib/security/credentials/fake/fake_credentials.c',
-                      'src/core/lib/security/credentials/google_default/credentials_posix.c',
-                      'src/core/lib/security/credentials/google_default/credentials_windows.c',
-                      'src/core/lib/security/credentials/google_default/google_default_credentials.c',
-                      'src/core/lib/security/credentials/iam/iam_credentials.c',
-                      'src/core/lib/security/credentials/jwt/json_token.c',
-                      'src/core/lib/security/credentials/jwt/jwt_credentials.c',
-                      'src/core/lib/security/credentials/jwt/jwt_verifier.c',
-                      'src/core/lib/security/credentials/oauth2/oauth2_credentials.c',
-                      'src/core/lib/security/credentials/plugin/plugin_credentials.c',
-                      'src/core/lib/security/credentials/ssl/ssl_credentials.c',
-                      'src/core/lib/security/transport/client_auth_filter.c',
-                      'src/core/lib/security/transport/handshake.c',
-                      'src/core/lib/security/transport/secure_endpoint.c',
-                      'src/core/lib/security/transport/security_connector.c',
-                      'src/core/lib/security/transport/server_auth_filter.c',
-                      'src/core/lib/security/transport/tsi_error.c',
-                      'src/core/lib/security/util/b64.c',
-                      'src/core/lib/security/util/json_util.c',
-                      'src/core/lib/surface/init_secure.c',
-                      'src/core/lib/tsi/fake_transport_security.c',
-                      'src/core/lib/tsi/ssl_transport_security.c',
-                      'src/core/lib/tsi/transport_security.c',
-                      'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
-                      'src/core/ext/client_config/channel_connectivity.c',
-                      'src/core/ext/client_config/client_channel.c',
-                      'src/core/ext/client_config/client_channel_factory.c',
-                      'src/core/ext/client_config/client_config.c',
-                      'src/core/ext/client_config/client_config_plugin.c',
-                      'src/core/ext/client_config/connector.c',
-                      'src/core/ext/client_config/default_initial_connect_string.c',
-                      'src/core/ext/client_config/initial_connect_string.c',
-                      'src/core/ext/client_config/lb_policy.c',
-                      'src/core/ext/client_config/lb_policy_factory.c',
-                      'src/core/ext/client_config/lb_policy_registry.c',
-                      'src/core/ext/client_config/parse_address.c',
-                      'src/core/ext/client_config/resolver.c',
-                      'src/core/ext/client_config/resolver_factory.c',
-                      'src/core/ext/client_config/resolver_registry.c',
-                      'src/core/ext/client_config/subchannel.c',
-                      'src/core/ext/client_config/subchannel_call_holder.c',
-                      'src/core/ext/client_config/subchannel_index.c',
-                      'src/core/ext/client_config/uri_parser.c',
-                      'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
-                      'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
-                      'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
-                      'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
-                      'src/core/ext/lb_policy/grpclb/grpclb.c',
-                      'src/core/ext/lb_policy/grpclb/load_balancer_api.c',
-                      'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
-                      'third_party/nanopb/pb_common.c',
-                      'third_party/nanopb/pb_decode.c',
-                      'third_party/nanopb/pb_encode.c',
-                      'src/core/ext/lb_policy/pick_first/pick_first.c',
-                      'src/core/ext/lb_policy/round_robin/round_robin.c',
-                      'src/core/ext/resolver/dns/native/dns_resolver.c',
-                      'src/core/ext/resolver/sockaddr/sockaddr_resolver.c',
-                      'src/core/ext/load_reporting/load_reporting.c',
-                      'src/core/ext/load_reporting/load_reporting_filter.c',
-                      'src/core/ext/census/context.c',
-                      'src/core/ext/census/gen/census.pb.c',
-                      'src/core/ext/census/grpc_context.c',
-                      'src/core/ext/census/grpc_filter.c',
-                      'src/core/ext/census/grpc_plugin.c',
-                      'src/core/ext/census/initialize.c',
-                      'src/core/ext/census/mlog.c',
-                      'src/core/ext/census/operation.c',
-                      'src/core/ext/census/placeholders.c',
-                      'src/core/ext/census/tracing.c',
-                      'src/core/plugin_registry/grpc_plugin_registry.c'
-
-    ss.private_header_files = 'src/core/lib/profiling/timers.h',
-                              'src/core/lib/support/backoff.h',
-                              'src/core/lib/support/block_annotate.h',
-                              'src/core/lib/support/env.h',
-                              'src/core/lib/support/murmur_hash.h',
-                              'src/core/lib/support/stack_lockfree.h',
-                              'src/core/lib/support/string.h',
-                              'src/core/lib/support/string_windows.h',
-                              'src/core/lib/support/thd_internal.h',
-                              'src/core/lib/support/time_precise.h',
-                              'src/core/lib/support/tmpfile.h',
-                              'src/core/lib/channel/channel_args.h',
-                              'src/core/lib/channel/channel_stack.h',
-                              'src/core/lib/channel/channel_stack_builder.h',
-                              'src/core/lib/channel/compress_filter.h',
-                              'src/core/lib/channel/connected_channel.h',
-                              'src/core/lib/channel/context.h',
-                              'src/core/lib/channel/http_client_filter.h',
-                              'src/core/lib/channel/http_server_filter.h',
-                              'src/core/lib/compression/algorithm_metadata.h',
-                              'src/core/lib/compression/message_compress.h',
-                              'src/core/lib/debug/trace.h',
-                              'src/core/lib/http/format_request.h',
-                              'src/core/lib/http/httpcli.h',
-                              'src/core/lib/http/parser.h',
-                              'src/core/lib/iomgr/closure.h',
-                              'src/core/lib/iomgr/endpoint.h',
-                              'src/core/lib/iomgr/endpoint_pair.h',
-                              'src/core/lib/iomgr/error.h',
-                              'src/core/lib/iomgr/ev_poll_and_epoll_posix.h',
-                              'src/core/lib/iomgr/ev_poll_posix.h',
-                              'src/core/lib/iomgr/ev_posix.h',
-                              'src/core/lib/iomgr/exec_ctx.h',
-                              'src/core/lib/iomgr/executor.h',
-                              'src/core/lib/iomgr/iocp_windows.h',
-                              'src/core/lib/iomgr/iomgr.h',
-                              'src/core/lib/iomgr/iomgr_internal.h',
-                              'src/core/lib/iomgr/iomgr_posix.h',
-                              'src/core/lib/iomgr/load_file.h',
-                              'src/core/lib/iomgr/polling_entity.h',
-                              'src/core/lib/iomgr/pollset.h',
-                              'src/core/lib/iomgr/pollset_set.h',
-                              'src/core/lib/iomgr/pollset_set_windows.h',
-                              'src/core/lib/iomgr/pollset_windows.h',
-                              'src/core/lib/iomgr/resolve_address.h',
-                              'src/core/lib/iomgr/sockaddr.h',
-                              'src/core/lib/iomgr/sockaddr_posix.h',
-                              'src/core/lib/iomgr/sockaddr_utils.h',
-                              'src/core/lib/iomgr/sockaddr_windows.h',
-                              'src/core/lib/iomgr/socket_utils_posix.h',
-                              'src/core/lib/iomgr/socket_windows.h',
-                              'src/core/lib/iomgr/tcp_client.h',
-                              'src/core/lib/iomgr/tcp_posix.h',
-                              'src/core/lib/iomgr/tcp_server.h',
-                              'src/core/lib/iomgr/tcp_windows.h',
-                              'src/core/lib/iomgr/time_averaged_stats.h',
-                              'src/core/lib/iomgr/timer.h',
-                              'src/core/lib/iomgr/timer_heap.h',
-                              'src/core/lib/iomgr/udp_server.h',
-                              'src/core/lib/iomgr/unix_sockets_posix.h',
-                              'src/core/lib/iomgr/wakeup_fd_pipe.h',
-                              'src/core/lib/iomgr/wakeup_fd_posix.h',
-                              'src/core/lib/iomgr/workqueue.h',
-                              'src/core/lib/iomgr/workqueue_posix.h',
-                              'src/core/lib/iomgr/workqueue_windows.h',
-                              'src/core/lib/json/json.h',
-                              'src/core/lib/json/json_common.h',
-                              'src/core/lib/json/json_reader.h',
-                              'src/core/lib/json/json_writer.h',
-                              'src/core/lib/surface/api_trace.h',
-                              'src/core/lib/surface/call.h',
-                              'src/core/lib/surface/call_test_only.h',
-                              'src/core/lib/surface/channel.h',
-                              'src/core/lib/surface/channel_init.h',
-                              'src/core/lib/surface/channel_stack_type.h',
-                              'src/core/lib/surface/completion_queue.h',
-                              'src/core/lib/surface/event_string.h',
-                              'src/core/lib/surface/init.h',
-                              'src/core/lib/surface/lame_client.h',
-                              'src/core/lib/surface/server.h',
-                              'src/core/lib/surface/surface_trace.h',
-                              'src/core/lib/transport/byte_stream.h',
-                              'src/core/lib/transport/connectivity_state.h',
-                              'src/core/lib/transport/metadata.h',
-                              'src/core/lib/transport/metadata_batch.h',
-                              'src/core/lib/transport/static_metadata.h',
-                              'src/core/lib/transport/transport.h',
-                              'src/core/lib/transport/transport_impl.h',
-                              'src/core/ext/transport/chttp2/transport/bin_decoder.h',
-                              'src/core/ext/transport/chttp2/transport/bin_encoder.h',
-                              'src/core/ext/transport/chttp2/transport/chttp2_transport.h',
-                              'src/core/ext/transport/chttp2/transport/frame.h',
-                              'src/core/ext/transport/chttp2/transport/frame_data.h',
-                              'src/core/ext/transport/chttp2/transport/frame_goaway.h',
-                              'src/core/ext/transport/chttp2/transport/frame_ping.h',
-                              'src/core/ext/transport/chttp2/transport/frame_rst_stream.h',
-                              'src/core/ext/transport/chttp2/transport/frame_settings.h',
-                              'src/core/ext/transport/chttp2/transport/frame_window_update.h',
-                              'src/core/ext/transport/chttp2/transport/hpack_encoder.h',
-                              'src/core/ext/transport/chttp2/transport/hpack_parser.h',
-                              'src/core/ext/transport/chttp2/transport/hpack_table.h',
-                              'src/core/ext/transport/chttp2/transport/http2_errors.h',
-                              'src/core/ext/transport/chttp2/transport/huffsyms.h',
-                              'src/core/ext/transport/chttp2/transport/incoming_metadata.h',
-                              'src/core/ext/transport/chttp2/transport/internal.h',
-                              'src/core/ext/transport/chttp2/transport/status_conversion.h',
-                              'src/core/ext/transport/chttp2/transport/stream_map.h',
-                              'src/core/ext/transport/chttp2/transport/timeout_encoding.h',
-                              'src/core/ext/transport/chttp2/transport/varint.h',
-                              'src/core/ext/transport/chttp2/alpn/alpn.h',
-                              'src/core/lib/security/context/security_context.h',
-                              'src/core/lib/security/credentials/composite/composite_credentials.h',
-                              'src/core/lib/security/credentials/credentials.h',
-                              'src/core/lib/security/credentials/fake/fake_credentials.h',
-                              'src/core/lib/security/credentials/google_default/google_default_credentials.h',
-                              'src/core/lib/security/credentials/iam/iam_credentials.h',
-                              'src/core/lib/security/credentials/jwt/json_token.h',
-                              'src/core/lib/security/credentials/jwt/jwt_credentials.h',
-                              'src/core/lib/security/credentials/jwt/jwt_verifier.h',
-                              'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
-                              'src/core/lib/security/credentials/plugin/plugin_credentials.h',
-                              'src/core/lib/security/credentials/ssl/ssl_credentials.h',
-                              'src/core/lib/security/transport/auth_filters.h',
-                              'src/core/lib/security/transport/handshake.h',
-                              'src/core/lib/security/transport/secure_endpoint.h',
-                              'src/core/lib/security/transport/security_connector.h',
-                              'src/core/lib/security/transport/tsi_error.h',
-                              'src/core/lib/security/util/b64.h',
-                              'src/core/lib/security/util/json_util.h',
-                              'src/core/lib/tsi/fake_transport_security.h',
-                              'src/core/lib/tsi/ssl_transport_security.h',
-                              'src/core/lib/tsi/ssl_types.h',
-                              'src/core/lib/tsi/transport_security.h',
-                              'src/core/lib/tsi/transport_security_interface.h',
-                              'src/core/ext/client_config/client_channel.h',
-                              'src/core/ext/client_config/client_channel_factory.h',
-                              'src/core/ext/client_config/client_config.h',
-                              'src/core/ext/client_config/connector.h',
-                              'src/core/ext/client_config/initial_connect_string.h',
-                              'src/core/ext/client_config/lb_policy.h',
-                              'src/core/ext/client_config/lb_policy_factory.h',
-                              'src/core/ext/client_config/lb_policy_registry.h',
-                              'src/core/ext/client_config/parse_address.h',
-                              'src/core/ext/client_config/resolver.h',
-                              'src/core/ext/client_config/resolver_factory.h',
-                              'src/core/ext/client_config/resolver_registry.h',
-                              'src/core/ext/client_config/subchannel.h',
-                              'src/core/ext/client_config/subchannel_call_holder.h',
-                              'src/core/ext/client_config/subchannel_index.h',
-                              'src/core/ext/client_config/uri_parser.h',
-                              'src/core/ext/lb_policy/grpclb/grpclb.h',
-                              'src/core/ext/lb_policy/grpclb/load_balancer_api.h',
-                              'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
-                              'third_party/nanopb/pb.h',
-                              'third_party/nanopb/pb_common.h',
-                              'third_party/nanopb/pb_decode.h',
-                              'third_party/nanopb/pb_encode.h',
-                              'src/core/ext/load_reporting/load_reporting.h',
-                              'src/core/ext/load_reporting/load_reporting_filter.h',
-                              'src/core/ext/census/aggregation.h',
-                              'src/core/ext/census/census_interface.h',
-                              'src/core/ext/census/census_rpc_stats.h',
-                              'src/core/ext/census/gen/census.pb.h',
-                              'src/core/ext/census/grpc_filter.h',
-                              'src/core/ext/census/mlog.h',
-                              'src/core/ext/census/rpc_metric_id.h'
-
-    ss.header_mappings_dir = '.'
-    # This isn't officially supported in Cocoapods. We've asked for an alternative:
-    # https://github.com/CocoaPods/CocoaPods/issues/4386
-    ss.xcconfig = {
-      'USE_HEADERMAP' => 'NO',
-      'ALWAYS_SEARCH_USER_PATHS' => 'NO',
-      'USER_HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC"',
-      'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC/include"'
-    }
-
-    ss.requires_arc = false
-    ss.libraries = 'z'
-    ss.dependency 'BoringSSL', '~> 3.0'
-
-    # ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
-  end
-
-  # Objective-C wrapper around the core gRPC library.
-  s.subspec 'GRPCClient' do |ss|
-    src_dir = "#{objc_dir}/GRPCClient"
-    ss.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}"
-    ss.private_header_files = "#{src_dir}/private/*.h"
-    ss.header_mappings_dir = "#{objc_dir}"
 
-    ss.dependency 'gRPC/C-Core'
-    ss.dependency 'gRPC/RxLibrary'
+  name = 'GRPCClient'
+  s.module_name = name
+  s.header_dir = name
 
-    # Certificates, to be able to establish TLS connections:
-    ss.resource_bundles = { 'gRPCCertificates' => ['etc/roots.pem'] }
-  end
+  src_dir = 'src/objective-c/GRPCClient'
+  s.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}"
+  s.private_header_files = "#{src_dir}/private/*.h"
+  s.header_mappings_dir = "#{src_dir}"
 
-  # RPC library for ProtocolBuffers, based on gRPC
-  s.subspec 'ProtoRPC' do |ss|
-    src_dir = "#{objc_dir}/ProtoRPC"
-    ss.source_files = "#{src_dir}/*.{h,m}"
-    ss.header_mappings_dir = "#{objc_dir}"
+  s.dependency 'gRPC-Core', version
+  s.dependency 'gRPC-RxLibrary', version
 
-    ss.dependency 'gRPC/GRPCClient'
-    ss.dependency 'gRPC/RxLibrary'
-    ss.dependency 'Protobuf', '~> 3.0.0-alpha-4'
-  end
+  # Certificates, to be able to establish TLS connections:
+  s.resource_bundles = { 'gRPCCertificates' => ['etc/roots.pem'] }
 end
diff --git a/grpc.def b/grpc.def
index 95030adc3906bbbb331c585e4494906526b1a1b9..0849f84e0bc5c5489bfbe0f3c024485e8cf0fb9c 100644
--- a/grpc.def
+++ b/grpc.def
@@ -90,6 +90,7 @@ EXPORTS
     grpc_call_error_to_string
     grpc_insecure_channel_create_from_fd
     grpc_server_add_insecure_channel_from_fd
+    grpc_use_signal
     grpc_auth_property_iterator_next
     grpc_auth_context_property_iterator
     grpc_auth_context_peer_identity
diff --git a/grpc.gemspec b/grpc.gemspec
index 7e379c027ce1b1695fc2b22c714108f8d382d562..95232ad522b4a7e09b5e9266102e700420f0499c 100755
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -189,6 +189,7 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/iomgr/endpoint.h )
   s.files += %w( src/core/lib/iomgr/endpoint_pair.h )
   s.files += %w( src/core/lib/iomgr/error.h )
+  s.files += %w( src/core/lib/iomgr/ev_epoll_linux.h )
   s.files += %w( src/core/lib/iomgr/ev_poll_and_epoll_posix.h )
   s.files += %w( src/core/lib/iomgr/ev_poll_posix.h )
   s.files += %w( src/core/lib/iomgr/ev_posix.h )
@@ -199,6 +200,7 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/iomgr/iomgr_internal.h )
   s.files += %w( src/core/lib/iomgr/iomgr_posix.h )
   s.files += %w( src/core/lib/iomgr/load_file.h )
+  s.files += %w( src/core/lib/iomgr/network_status_tracker.h )
   s.files += %w( src/core/lib/iomgr/polling_entity.h )
   s.files += %w( src/core/lib/iomgr/pollset.h )
   s.files += %w( src/core/lib/iomgr/pollset_set.h )
@@ -240,7 +242,6 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/surface/init.h )
   s.files += %w( src/core/lib/surface/lame_client.h )
   s.files += %w( src/core/lib/surface/server.h )
-  s.files += %w( src/core/lib/surface/surface_trace.h )
   s.files += %w( src/core/lib/transport/byte_stream.h )
   s.files += %w( src/core/lib/transport/connectivity_state.h )
   s.files += %w( src/core/lib/transport/metadata.h )
@@ -345,6 +346,7 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/iomgr/endpoint_pair_posix.c )
   s.files += %w( src/core/lib/iomgr/endpoint_pair_windows.c )
   s.files += %w( src/core/lib/iomgr/error.c )
+  s.files += %w( src/core/lib/iomgr/ev_epoll_linux.c )
   s.files += %w( src/core/lib/iomgr/ev_poll_and_epoll_posix.c )
   s.files += %w( src/core/lib/iomgr/ev_poll_posix.c )
   s.files += %w( src/core/lib/iomgr/ev_posix.c )
@@ -355,6 +357,7 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/iomgr/iomgr_posix.c )
   s.files += %w( src/core/lib/iomgr/iomgr_windows.c )
   s.files += %w( src/core/lib/iomgr/load_file.c )
+  s.files += %w( src/core/lib/iomgr/network_status_tracker.c )
   s.files += %w( src/core/lib/iomgr/polling_entity.c )
   s.files += %w( src/core/lib/iomgr/pollset_set_windows.c )
   s.files += %w( src/core/lib/iomgr/pollset_windows.c )
diff --git a/include/grpc++/impl/codegen/async_stream.h b/include/grpc++/impl/codegen/async_stream.h
index 0606441549eb046169a08abab01a8629321fd006..e96d224ddbeb10fc66c7f869ed506c1115254286 100644
--- a/include/grpc++/impl/codegen/async_stream.h
+++ b/include/grpc++/impl/codegen/async_stream.h
@@ -52,11 +52,14 @@ class ClientAsyncStreamingInterface {
 
   /// Request notification of the reading of the initial metadata. Completion
   /// will be notified by \a tag on the associated completion queue.
+  /// This call is optional, but if it is used, it cannot be used concurrently
+  /// with or after the \a Read method.
   ///
   /// \param[in] tag Tag identifying this request.
   virtual void ReadInitialMetadata(void* tag) = 0;
 
-  /// Request notification completion.
+  /// Indicate that the stream is to be finished and request notification
+  /// Should not be used concurrently with other operations
   ///
   /// \param[out] status To be updated with the operation status.
   /// \param[in] tag Tag identifying this request.
@@ -71,6 +74,11 @@ class AsyncReaderInterface {
 
   /// Read a message of type \a R into \a msg. Completion will be notified by \a
   /// tag on the associated completion queue.
+  /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+  /// should not be called concurrently with other streaming APIs
+  /// on the same stream. It is not meaningful to call it concurrently
+  /// with another \a Read on the same stream since reads on the same stream
+  /// are delivered in order.
   ///
   /// \param[out] msg Where to eventually store the read message.
   /// \param[in] tag The tag identifying the operation.
@@ -88,6 +96,7 @@ class AsyncWriterInterface {
   /// Only one write may be outstanding at any given time. This means that
   /// after calling Write, one must wait to receive \a tag from the completion
   /// queue BEFORE calling Write again.
+  /// This is thread-safe with respect to \a Read
   ///
   /// \param[in] msg The message to be written.
   /// \param[in] tag The tag identifying the operation.
@@ -158,6 +167,7 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
                                    public AsyncWriterInterface<W> {
  public:
   /// Signal the client is done with the writes.
+  /// Thread-safe with respect to \a Read
   ///
   /// \param[in] tag The tag identifying the operation.
   virtual void WritesDone(void* tag) = 0;
@@ -229,6 +239,7 @@ class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
                                          public AsyncReaderInterface<R> {
  public:
   /// Signal the client is done with the writes.
+  /// Thread-safe with respect to \a Read
   ///
   /// \param[in] tag The tag identifying the operation.
   virtual void WritesDone(void* tag) = 0;
diff --git a/include/grpc++/impl/codegen/client_context.h b/include/grpc++/impl/codegen/client_context.h
index e23fd4eda3486d7198e8a4ad3a0d06abe67a28d1..a132c9a57aa7fb77c69783bf5492d68409b7b35e 100644
--- a/include/grpc++/impl/codegen/client_context.h
+++ b/include/grpc++/impl/codegen/client_context.h
@@ -193,7 +193,7 @@ class ClientContext {
   ///
   /// \return A multimap of initial metadata key-value pairs from the server.
   const std::multimap<grpc::string_ref, grpc::string_ref>&
-  GetServerInitialMetadata() {
+  GetServerInitialMetadata() const {
     GPR_CODEGEN_ASSERT(initial_metadata_received_);
     return recv_initial_metadata_;
   }
@@ -205,7 +205,7 @@ class ClientContext {
   ///
   /// \return A multimap of metadata trailing key-value pairs from the server.
   const std::multimap<grpc::string_ref, grpc::string_ref>&
-  GetServerTrailingMetadata() {
+  GetServerTrailingMetadata() const {
     // TODO(yangg) check finished
     return trailing_metadata_;
   }
@@ -230,13 +230,13 @@ class ClientContext {
 
 #ifndef GRPC_CXX0X_NO_CHRONO
   /// Return the deadline for the client call.
-  std::chrono::system_clock::time_point deadline() {
+  std::chrono::system_clock::time_point deadline() const {
     return Timespec2Timepoint(deadline_);
   }
 #endif  // !GRPC_CXX0X_NO_CHRONO
 
   /// Return a \a gpr_timespec representation of the client call's deadline.
-  gpr_timespec raw_deadline() { return deadline_; }
+  gpr_timespec raw_deadline() const { return deadline_; }
 
   /// Set the per call authority header (see
   /// https://tools.ietf.org/html/rfc7540#section-8.1.2.3).
@@ -337,7 +337,7 @@ class ClientContext {
                                   const InputMessage& request,
                                   OutputMessage* result);
 
-  grpc_call* call() { return call_; }
+  grpc_call* call() const { return call_; }
   void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel);
 
   uint32_t initial_metadata_flags() const {
diff --git a/include/grpc++/impl/codegen/completion_queue.h b/include/grpc++/impl/codegen/completion_queue.h
index 1b84b4470500728abfeaffbc443259ac83e42799..03009e0561d859bc65f387aceea1884caf645eb3 100644
--- a/include/grpc++/impl/codegen/completion_queue.h
+++ b/include/grpc++/impl/codegen/completion_queue.h
@@ -31,8 +31,19 @@
  *
  */
 
-/// A completion queue implements a concurrent producer-consumer queue, with two
-/// main methods, \a Next and \a AsyncNext.
+/// A completion queue implements a concurrent producer-consumer queue, with
+/// two main API-exposed methods: \a Next and \a AsyncNext. These
+/// methods are the essential component of the gRPC C++ asynchronous API.
+/// There is also a \a Shutdown method to indicate that a given completion queue
+/// will no longer have regular events. This must be called before the
+/// completion queue is destroyed.
+/// All completion queue APIs are thread-safe and may be used concurrently with
+/// any other completion queue API invocation; it is acceptable to have
+/// multiple threads calling \a Next or \a AsyncNext on the same or different
+/// completion queues, or to call these methods concurrently with a \a Shutdown
+/// elsewhere.
+/// \remark{All other API calls on completion queue should be completed before
+/// a completion queue destructor is called.}
 #ifndef GRPCXX_IMPL_CODEGEN_COMPLETION_QUEUE_H
 #define GRPCXX_IMPL_CODEGEN_COMPLETION_QUEUE_H
 
diff --git a/include/grpc++/impl/codegen/core_codegen.h b/include/grpc++/impl/codegen/core_codegen.h
index b0c4c57e66a2f8db507eef43d0cfe1a0e4783206..9699abfb438725fb6fbe3f1454820c59c36a1711 100644
--- a/include/grpc++/impl/codegen/core_codegen.h
+++ b/include/grpc++/impl/codegen/core_codegen.h
@@ -54,8 +54,8 @@ class CoreCodegen : public CoreCodegenInterface {
 
   void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) GRPC_OVERRIDE;
 
-  void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
-                                    grpc_byte_buffer* buffer) GRPC_OVERRIDE;
+  int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+                                   grpc_byte_buffer* buffer) GRPC_OVERRIDE;
   void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader)
       GRPC_OVERRIDE;
   int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
diff --git a/include/grpc++/impl/codegen/core_codegen_interface.h b/include/grpc++/impl/codegen/core_codegen_interface.h
index 64d882ed5db10b68a371f21201b054e933819824..f9a8f9b980b9bb5d06dff63d70118361b95555e9 100644
--- a/include/grpc++/impl/codegen/core_codegen_interface.h
+++ b/include/grpc++/impl/codegen/core_codegen_interface.h
@@ -65,8 +65,9 @@ class CoreCodegenInterface {
 
   virtual void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) = 0;
 
-  virtual void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
-                                            grpc_byte_buffer* buffer) = 0;
+  virtual int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+                                           grpc_byte_buffer* buffer)
+      GRPC_MUST_USE_RESULT = 0;
   virtual void grpc_byte_buffer_reader_destroy(
       grpc_byte_buffer_reader* reader) = 0;
   virtual int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
diff --git a/include/grpc++/impl/codegen/impl/status_code_enum.h b/include/grpc++/impl/codegen/impl/status_code_enum.h
deleted file mode 100644
index f8caec0c119371ea9f569094331dced2772693e2..0000000000000000000000000000000000000000
--- a/include/grpc++/impl/codegen/impl/status_code_enum.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPCXX_IMPL_CODEGEN_IMPL_STATUS_CODE_ENUM_H
-#define GRPCXX_IMPL_CODEGEN_IMPL_STATUS_CODE_ENUM_H
-
-namespace grpc {
-
-enum StatusCode {
-  /// Not an error; returned on success.
-  OK = 0,
-
-  /// The operation was cancelled (typically by the caller).
-  CANCELLED = 1,
-
-  /// Unknown error. An example of where this error may be returned is if a
-  /// Status value received from another address space belongs to an error-space
-  /// that is not known in this address space. Also errors raised by APIs that
-  /// do not return enough error information may be converted to this error.
-  UNKNOWN = 2,
-
-  /// Client specified an invalid argument. Note that this differs from
-  /// FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments that are
-  /// problematic regardless of the state of the system (e.g., a malformed file
-  /// name).
-  INVALID_ARGUMENT = 3,
-
-  /// Deadline expired before operation could complete. For operations that
-  /// change the state of the system, this error may be returned even if the
-  /// operation has completed successfully. For example, a successful response
-  /// from a server could have been delayed long enough for the deadline to
-  /// expire.
-  DEADLINE_EXCEEDED = 4,
-
-  /// Some requested entity (e.g., file or directory) was not found.
-  NOT_FOUND = 5,
-
-  /// Some entity that we attempted to create (e.g., file or directory) already
-  /// exists.
-  ALREADY_EXISTS = 6,
-
-  /// The caller does not have permission to execute the specified operation.
-  /// PERMISSION_DENIED must not be used for rejections caused by exhausting
-  /// some resource (use RESOURCE_EXHAUSTED instead for those errors).
-  /// PERMISSION_DENIED must not be used if the caller can not be identified
-  /// (use UNAUTHENTICATED instead for those errors).
-  PERMISSION_DENIED = 7,
-
-  /// The request does not have valid authentication credentials for the
-  /// operation.
-  UNAUTHENTICATED = 16,
-
-  /// Some resource has been exhausted, perhaps a per-user quota, or perhaps the
-  /// entire file system is out of space.
-  RESOURCE_EXHAUSTED = 8,
-
-  /// Operation was rejected because the system is not in a state required for
-  /// the operation's execution. For example, directory to be deleted may be
-  /// non-empty, an rmdir operation is applied to a non-directory, etc.
-  ///
-  /// A litmus test that may help a service implementor in deciding
-  /// between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
-  ///  (a) Use UNAVAILABLE if the client can retry just the failing call.
-  ///  (b) Use ABORTED if the client should retry at a higher-level
-  ///      (e.g., restarting a read-modify-write sequence).
-  ///  (c) Use FAILED_PRECONDITION if the client should not retry until
-  ///      the system state has been explicitly fixed. E.g., if an "rmdir"
-  ///      fails because the directory is non-empty, FAILED_PRECONDITION
-  ///      should be returned since the client should not retry unless
-  ///      they have first fixed up the directory by deleting files from it.
-  ///  (d) Use FAILED_PRECONDITION if the client performs conditional
-  ///      REST Get/Update/Delete on a resource and the resource on the
-  ///      server does not match the condition. E.g., conflicting
-  ///      read-modify-write on the same resource.
-  FAILED_PRECONDITION = 9,
-
-  /// The operation was aborted, typically due to a concurrency issue like
-  /// sequencer check failures, transaction aborts, etc.
-  ///
-  /// See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
-  /// and UNAVAILABLE.
-  ABORTED = 10,
-
-  /// Operation was attempted past the valid range. E.g., seeking or reading
-  /// past end of file.
-  ///
-  /// Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed
-  /// if the system state changes. For example, a 32-bit file system will
-  /// generate INVALID_ARGUMENT if asked to read at an offset that is not in the
-  /// range [0,2^32-1], but it will generate OUT_OF_RANGE if asked to read from
-  /// an offset past the current file size.
-  ///
-  /// There is a fair bit of overlap between FAILED_PRECONDITION and
-  /// OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific error)
-  /// when it applies so that callers who are iterating through a space can
-  /// easily look for an OUT_OF_RANGE error to detect when they are done.
-  OUT_OF_RANGE = 11,
-
-  /// Operation is not implemented or not supported/enabled in this service.
-  UNIMPLEMENTED = 12,
-
-  /// Internal errors. Means some invariants expected by underlying System has
-  /// been broken. If you see one of these errors, Something is very broken.
-  INTERNAL = 13,
-
-  /// The service is currently unavailable. This is a most likely a transient
-  /// condition and may be corrected by retrying with a backoff.
-  ///
-  /// See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
-  /// and UNAVAILABLE.
-  UNAVAILABLE = 14,
-
-  /// Unrecoverable data loss or corruption.
-  DATA_LOSS = 15,
-
-  /// Force users to include a default branch:
-  DO_NOT_USE = -1
-};
-
-}  // namespace grpc
-
-#endif  // GRPCXX_IMPL_CODEGEN_IMPL_STATUS_CODE_ENUM_H
diff --git a/include/grpc++/impl/codegen/proto_utils.h b/include/grpc++/impl/codegen/proto_utils.h
index 3bad468a74e94d9219fd44d20d1dee31ef0d9d6b..d4599c5ffff5842b578c57e02b6c45347aba7b60 100644
--- a/include/grpc++/impl/codegen/proto_utils.h
+++ b/include/grpc++/impl/codegen/proto_utils.h
@@ -111,14 +111,21 @@ class GrpcBufferReader GRPC_FINAL
     : public ::grpc::protobuf::io::ZeroCopyInputStream {
  public:
   explicit GrpcBufferReader(grpc_byte_buffer* buffer)
-      : byte_count_(0), backup_count_(0) {
-    g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader_, buffer);
+      : byte_count_(0), backup_count_(0), status_() {
+    if (!g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader_,
+                                                                buffer)) {
+      status_ = Status(StatusCode::INTERNAL,
+                       "Couldn't initialize byte buffer reader");
+    }
   }
   ~GrpcBufferReader() GRPC_OVERRIDE {
     g_core_codegen_interface->grpc_byte_buffer_reader_destroy(&reader_);
   }
 
   bool Next(const void** data, int* size) GRPC_OVERRIDE {
+    if (!status_.ok()) {
+      return false;
+    }
     if (backup_count_ > 0) {
       *data = GPR_SLICE_START_PTR(slice_) + GPR_SLICE_LENGTH(slice_) -
               backup_count_;
@@ -139,6 +146,8 @@ class GrpcBufferReader GRPC_FINAL
     return true;
   }
 
+  Status status() const { return status_; }
+
   void BackUp(int count) GRPC_OVERRIDE { backup_count_ = count; }
 
   bool Skip(int count) GRPC_OVERRIDE {
@@ -165,6 +174,7 @@ class GrpcBufferReader GRPC_FINAL
   int64_t backup_count_;
   grpc_byte_buffer_reader reader_;
   gpr_slice slice_;
+  Status status_;
 };
 }  // namespace internal
 
@@ -202,6 +212,9 @@ class SerializationTraits<T, typename std::enable_if<std::is_base_of<
     Status result = g_core_codegen_interface->ok();
     {
       internal::GrpcBufferReader reader(buffer);
+      if (!reader.status().ok()) {
+        return reader.status();
+      }
       ::grpc::protobuf::io::CodedInputStream decoder(&reader);
       if (max_message_size > 0) {
         decoder.SetTotalBytesLimit(max_message_size, max_message_size);
diff --git a/include/grpc++/impl/codegen/server_context.h b/include/grpc++/impl/codegen/server_context.h
index a1e1ed176f6e1ae480a40e033c3b67a216ae4cb4..cea13a513f63b09c0df213a18d85fe017eaf4b90 100644
--- a/include/grpc++/impl/codegen/server_context.h
+++ b/include/grpc++/impl/codegen/server_context.h
@@ -94,12 +94,12 @@ class ServerContext {
   ~ServerContext();
 
 #ifndef GRPC_CXX0X_NO_CHRONO
-  std::chrono::system_clock::time_point deadline() {
+  std::chrono::system_clock::time_point deadline() const {
     return Timespec2Timepoint(deadline_);
   }
 #endif  // !GRPC_CXX0X_NO_CHRONO
 
-  gpr_timespec raw_deadline() { return deadline_; }
+  gpr_timespec raw_deadline() const { return deadline_; }
 
   void AddInitialMetadata(const grpc::string& key, const grpc::string& value);
   void AddTrailingMetadata(const grpc::string& key, const grpc::string& value);
@@ -122,7 +122,8 @@ class ServerContext {
   // was called.
   void TryCancel() const;
 
-  const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata() {
+  const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata()
+      const {
     return client_metadata_;
   }
 
diff --git a/include/grpc++/impl/codegen/sync_stream.h b/include/grpc++/impl/codegen/sync_stream.h
index e94ffe584224efdb7abf65102505978478a91f43..cbfa410699596d71937e48f425075b822c04a33f 100644
--- a/include/grpc++/impl/codegen/sync_stream.h
+++ b/include/grpc++/impl/codegen/sync_stream.h
@@ -71,6 +71,9 @@ class ReaderInterface {
   virtual ~ReaderInterface() {}
 
   /// Blocking read a message and parse to \a msg. Returns \a true on success.
+  /// This is thread-safe with respect to \a Write or \WritesDone methods on
+  /// the same stream. It should not be called concurrently with another \a
+  /// Read on the same stream as the order of delivery will not be defined.
   ///
   /// \param[out] msg The read message.
   ///
@@ -87,6 +90,7 @@ class WriterInterface {
   virtual ~WriterInterface() {}
 
   /// Blocking write \a msg to the stream with options.
+  /// This is thread-safe with respect to \a Read
   ///
   /// \param msg The message to be written to the stream.
   /// \param options Options affecting the write operation.
@@ -95,6 +99,7 @@ class WriterInterface {
   virtual bool Write(const W& msg, const WriteOptions& options) = 0;
 
   /// Blocking write \a msg to the stream with default options.
+  /// This is thread-safe with respect to \a Read
   ///
   /// \param msg The message to be written to the stream.
   ///
@@ -174,7 +179,8 @@ class ClientWriterInterface : public ClientStreamingInterface,
                               public WriterInterface<W> {
  public:
   /// Half close writing from the client.
-  /// Block until writes are completed.
+  /// Block until currently-pending writes are completed.
+  /// Thread safe with respect to \a Read operations only
   ///
   /// \return Whether the writes were successful.
   virtual bool WritesDone() = 0;
@@ -257,7 +263,8 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
   /// the metadata will be available in ClientContext after the first read.
   virtual void WaitForInitialMetadata() = 0;
 
-  /// Block until writes are completed.
+  /// Block until currently-pending writes are completed.
+  /// Thread-safe with respect to \a Read
   ///
   /// \return Whether the writes were successful.
   virtual bool WritesDone() = 0;
diff --git a/include/grpc++/support/byte_buffer.h b/include/grpc++/support/byte_buffer.h
index f6eb09638f5b15137ba8cbbfee9451adeb880371..20bd4071091276ffc238c013a75a87e3354570c2 100644
--- a/include/grpc++/support/byte_buffer.h
+++ b/include/grpc++/support/byte_buffer.h
@@ -64,7 +64,7 @@ class ByteBuffer GRPC_FINAL {
   ByteBuffer& operator=(const ByteBuffer&);
 
   /// Dump (read) the buffer contents into \a slices.
-  void Dump(std::vector<Slice>* slices) const;
+  Status Dump(std::vector<Slice>* slices) const;
 
   /// Remove all data.
   void Clear();
diff --git a/include/grpc++/support/slice.h b/include/grpc++/support/slice.h
index cec9062d4fd6e36690b641420a7ae4767238deb6..5874b4f5ae32d02bdcc65d2efd2bcd19fcb1a8ed 100644
--- a/include/grpc++/support/slice.h
+++ b/include/grpc++/support/slice.h
@@ -77,6 +77,9 @@ class Slice GRPC_FINAL {
   /// Raw pointer to the end (one byte \em past the last element) of the slice.
   const uint8_t* end() const { return GPR_SLICE_END_PTR(slice_); }
 
+  /// Raw C slice. Caller needs to call gpr_slice_unref when done.
+  gpr_slice c_slice() const { return gpr_slice_ref(slice_); }
+
  private:
   friend class ByteBuffer;
 
diff --git a/include/grpc/grpc_posix.h b/include/grpc/grpc_posix.h
index 9742b83374a453d08fe8099273a6fef09cea03b1..5e89ae3b1ee0c38be581db336543eb53b83fc41d 100644
--- a/include/grpc/grpc_posix.h
+++ b/include/grpc/grpc_posix.h
@@ -63,6 +63,14 @@ GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
                                                       grpc_completion_queue *cq,
                                                       int fd);
 
+/** GRPC Core POSIX library may internally use signals to optimize some work.
+   The library uses (SIGRTMIN + 2) signal by default. Use this API to instruct
+   the library to use a different signal i.e 'signum' instead.
+   Note:
+   - To prevent GRPC library from using any signals, pass a 'signum' of -1
+   - This API is optional but if called, it MUST be called before grpc_init() */
+GRPCAPI void grpc_use_signal(int signum);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/grpc/impl/codegen/byte_buffer.h b/include/grpc/impl/codegen/byte_buffer.h
index 3ae8ac50ba1ebb49e4217d552ff1d0b6005ffcd1..fe1e2159793bce21ec1186389fec965b0cc0bed4 100644
--- a/include/grpc/impl/codegen/byte_buffer.h
+++ b/include/grpc/impl/codegen/byte_buffer.h
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -93,9 +93,10 @@ GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
 struct grpc_byte_buffer_reader;
 typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
 
-/** Initialize \a reader to read over \a buffer */
-GRPCAPI void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
-                                          grpc_byte_buffer *buffer);
+/** Initialize \a reader to read over \a buffer.
+ * Returns 1 upon success, 0 otherwise. */
+GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
+                                         grpc_byte_buffer *buffer);
 
 /** Cleanup and destroy \a reader */
 GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h
index c51ffa449300150f0090dce985ba1621f902bff6..c0ed13950662a94a5e1bad067ad86728a6682249 100644
--- a/include/grpc/impl/codegen/grpc_types.h
+++ b/include/grpc/impl/codegen/grpc_types.h
@@ -156,6 +156,8 @@ typedef struct {
 #define GRPC_SSL_TARGET_NAME_OVERRIDE_ARG "grpc.ssl_target_name_override"
 /* Maximum metadata size */
 #define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size"
+/** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */
+#define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport"
 
 /** Result of a grpc call. If the caller satisfies the prerequisites of a
     particular operation, the grpc_call_error returned will be GRPC_CALL_OK.
diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h
index b8aa199aa84f5a85269fe38c294d9d1bb8af9f02..3ad665a7a297d08c4e49055b5445bb9c494837fa 100644
--- a/include/grpc/impl/codegen/port_platform.h
+++ b/include/grpc/impl/codegen/port_platform.h
@@ -207,6 +207,7 @@
 #ifdef __GLIBC_PREREQ
 #if __GLIBC_PREREQ(2, 9)
 #define GPR_LINUX_EVENTFD 1
+#define GPR_LINUX_EPOLL 1
 #endif
 #if __GLIBC_PREREQ(2, 10)
 #define GPR_LINUX_SOCKETUTILS 1
diff --git a/include/grpc/module.modulemap b/include/grpc/module.modulemap
new file mode 100644
index 0000000000000000000000000000000000000000..ae11a78b74a2ff60d6e39972ba177a55b69787a7
--- /dev/null
+++ b/include/grpc/module.modulemap
@@ -0,0 +1,5 @@
+framework module grpc {
+  umbrella header "grpc.h"
+  export *
+  module * { export * }
+}
diff --git a/package.json b/package.json
index 5bdaa761e28adac5ca5f0d45b414faf2ef29ee61..0e229c9842b38fed24d06d62c54b0f08f7b0de55 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
 {
   "name": "grpc",
-  "version": "0.15.0-dev",
+  "version": "1.1.0-dev",
   "author": "Google Inc.",
   "description": "gRPC Library for Node",
   "homepage": "http://www.grpc.io/",
@@ -59,7 +59,6 @@
   "files": [
     "LICENSE",
     "src/node/README.md",
-    "src/node/health_check",
     "src/proto",
     "etc",
     "src/node/index.js",
diff --git a/package.xml b/package.xml
index a8feffc2873b9842ddcdc222b100f5ed08518726..f9a45fb791d13d6c61980077f08b280476edaceb 100644
--- a/package.xml
+++ b/package.xml
@@ -10,19 +10,20 @@
   <email>grpc-packages@google.com</email>
   <active>yes</active>
  </lead>
- <date>2016-05-19</date>
+ <date>2016-07-13</date>
  <time>16:06:07</time>
  <version>
-  <release>0.15.0</release>
-  <api>0.15.0</api>
+  <release>1.1.0</release>
+  <api>1.1.0</api>
  </version>
  <stability>
-  <release>beta</release>
-  <api>beta</api>
+  <release>stable</release>
+  <api>stable</api>
  </stability>
  <license>BSD</license>
  <notes>
-- TBD
+- GA release
+- Fix shutdown hang problem #4017
  </notes>
  <contents>
   <dir baseinstalldir="/" name="/">
@@ -196,6 +197,7 @@
     <file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/error.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_linux.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_and_epoll_posix.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.h" role="src" />
@@ -206,6 +208,7 @@
     <file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_internal.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/load_file.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/iomgr/network_status_tracker.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/polling_entity.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/pollset.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/pollset_set.h" role="src" />
@@ -247,7 +250,6 @@
     <file baseinstalldir="/" name="src/core/lib/surface/init.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/surface/lame_client.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/surface/server.h" role="src" />
-    <file baseinstalldir="/" name="src/core/lib/surface/surface_trace.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/transport/byte_stream.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/transport/metadata.h" role="src" />
@@ -352,6 +354,7 @@
     <file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_posix.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_windows.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/error.c" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_linux.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_and_epoll_posix.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.c" role="src" />
@@ -362,6 +365,7 @@
     <file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_windows.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/load_file.c" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/iomgr/network_status_tracker.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/polling_entity.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/pollset_set_windows.c" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/pollset_windows.c" role="src" />
@@ -1033,6 +1037,7 @@ Update to wrap gRPC C Core version 0.10.0
    <license>BSD</license>
    <notes>
 - Simplify gRPC PHP installation #4517
+- Wrap gRPC core library version 0.13
    </notes>
   </release>
   <release>
@@ -1062,13 +1067,14 @@ Update to wrap gRPC C Core version 0.10.0
    <date>2016-04-19</date>
    <license>BSD</license>
    <notes>
+- wrap grpc C core version 0.14.0
 - destroy grpc_byte_buffer after startBatch #6096
    </notes>
   </release>
   <release>
    <version>
-    <release>0.14.2</release>
-    <api>0.14.2</api>
+    <release>0.15.0</release>
+    <api>0.15.0</api>
    </version>
    <stability>
     <release>beta</release>
@@ -1078,6 +1084,23 @@ Update to wrap gRPC C Core version 0.10.0
    <license>BSD</license>
    <notes>
 - Updated functions with TSRM macros for ZTS support #6607
+- Load default roots.pem via grpc_set_ssl_roots_override_callback #6848
+   </notes>
+  </release>
+  <release>
+   <version>
+    <release>1.0.0</release>
+    <api>1.0.0</api>
+   </version>
+   <stability>
+    <release>stable</release>
+    <api>stable</api>
+   </stability>
+   <date>2016-07-13</date>
+   <license>BSD</license>
+   <notes>
+- GA release
+- Fix shutdown hang problem #4017
    </notes>
   </release>
  </changelog>
diff --git a/setup.py b/setup.py
index f73501b8b351175bef988cf52f3559b7afecb9d3..de346a215014559c7d0dffbcd5fb29e1fd7fd006 100644
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,8 @@
 
 import os
 import os.path
+import platform
+import shlex
 import shutil
 import sys
 import sysconfig
@@ -55,15 +57,22 @@ os.chdir(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(0, os.path.abspath(PYTHON_STEM))
 
 # Break import-style to ensure we can actually find our in-repo dependencies.
+import _unixccompiler_patch
 import commands
 import grpc_core_dependencies
 import grpc_version
 
+if 'win32' in sys.platform:
+  _unixccompiler_patch.monkeypatch_unix_compiler()
+
+
 LICENSE = '3-clause BSD'
 
 # Environment variable to determine whether or not the Cython extension should
 # *use* Cython or use the generated C files. Note that this requires the C files
-# to have been generated by building first *with* Cython support.
+# to have been generated by building first *with* Cython support. Even if this
+# is set to false, if the script detects that the generated `.c` file isn't
+# present, then it will still attempt to use Cython.
 BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
 
 # Environment variable to determine whether or not to enable coverage analysis
@@ -71,9 +80,13 @@ BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
 ENABLE_CYTHON_TRACING = os.environ.get(
     'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False)
 
-# Environment variable to determine whether or not to include the test files in
-# the installation.
-INSTALL_TESTS = os.environ.get('GRPC_PYTHON_INSTALL_TESTS', False)
+# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
+# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
+# We use these environment variables to thus get around that without locking
+# ourselves in w.r.t. the multitude of operating systems this ought to build on.
+# By default we assume a GCC-like compiler.
+EXTRA_COMPILE_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS', ''))
+EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', ''))
 
 CYTHON_EXTENSION_PACKAGE_NAMES = ()
 
@@ -84,9 +97,7 @@ CYTHON_HELPER_C_FILES = (
     os.path.join(PYTHON_STEM, 'grpc/_cython/imports.generated.c'),
 )
 
-CORE_C_FILES = ()
-if not "win32" in sys.platform:
-  CORE_C_FILES += tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
+CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
 
 EXTENSION_INCLUDE_DIRECTORIES = (
     (PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE)
@@ -96,11 +107,17 @@ if "linux" in sys.platform:
   EXTENSION_LIBRARIES += ('rt',)
 if not "win32" in sys.platform:
   EXTENSION_LIBRARIES += ('m',)
+if "win32" in sys.platform:
+  EXTENSION_LIBRARIES += ('ws2_32',)
 
 DEFINE_MACROS = (('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600), ('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),)
+if "win32" in sys.platform:
+  DEFINE_MACROS += (('OPENSSL_WINDOWS', 1), ('WIN32_LEAN_AND_MEAN', 1),)
+  if '64bit' in platform.architecture()[0]:
+    DEFINE_MACROS += (('MS_WIN64', 1),)
 
-LDFLAGS = ()
-CFLAGS = ()
+LDFLAGS = tuple(EXTRA_LINK_ARGS)
+CFLAGS = tuple(EXTRA_COMPILE_ARGS)
 if "linux" in sys.platform:
   LDFLAGS += ('-Wl,-wrap,memcpy',)
 if "linux" in sys.platform or "darwin" in sys.platform:
@@ -131,10 +148,20 @@ def cython_extensions(module_names, extra_sources, include_dirs,
   if ENABLE_CYTHON_TRACING:
     define_macros = define_macros + [('CYTHON_TRACE_NOGIL', 1)]
     cython_compiler_directives['linetrace'] = True
-  file_extension = 'pyx' if build_with_cython else 'c'
-  module_files = [os.path.join(PYTHON_STEM,
-                               name.replace('.', '/') + '.' + file_extension)
-                  for name in module_names]
+  pyx_module_files = [os.path.join(PYTHON_STEM,
+                                   name.replace('.', '/') + '.pyx')
+                      for name in module_names]
+  c_module_files = [os.path.join(PYTHON_STEM,
+                                 name.replace('.', '/') + '.c')
+                    for name in module_names]
+  if not build_with_cython:
+    for module_file in c_module_files:
+      if not os.path.isfile(module_file):
+        sys.stderr.write('Cython-generated files are missing; '
+                         'forcing Cython build...\n')
+        build_with_cython = True
+        break
+  module_files = pyx_module_files if build_with_cython else c_module_files
   extensions = [
       _extension.Extension(
           name=module_name,
@@ -165,7 +192,7 @@ PACKAGE_DIRECTORIES = {
 }
 
 INSTALL_REQUIRES = (
-    'six>=1.10',
+    'six>=1.5.2',
     'enum34>=1.0.4',
     'futures>=2.2.0',
     # TODO(atash): eventually split the grpcio package into a metapackage
@@ -173,20 +200,18 @@ INSTALL_REQUIRES = (
     'protobuf>=3.0.0a3',
 )
 
-SETUP_REQUIRES = (
+SETUP_REQUIRES = INSTALL_REQUIRES + (
     'sphinx>=1.3',
-    'sphinx_rtd_theme>=0.1.8'
-) + INSTALL_REQUIRES
+    'sphinx_rtd_theme>=0.1.8',
+    'six>=1.10',
+)
 
 COMMAND_CLASS = {
     'doc': commands.SphinxDocumentation,
-    'build_proto_modules': commands.BuildProtoModules,
     'build_project_metadata': commands.BuildProjectMetadata,
     'build_py': commands.BuildPy,
     'build_ext': commands.BuildExt,
     'gather': commands.Gather,
-    'run_interop': commands.RunInterop,
-    'test_lite': commands.TestLite
 }
 
 # Ensure that package data is copied over before any commands have been run:
@@ -197,32 +222,6 @@ except OSError:
   pass
 shutil.copyfile('etc/roots.pem', os.path.join(credentials_dir, 'roots.pem'))
 
-TEST_PACKAGE_DATA = {
-    'tests.interop': [
-        'credentials/ca.pem',
-        'credentials/server1.key',
-        'credentials/server1.pem',
-    ],
-    'tests.protoc_plugin': [
-        'protoc_plugin_test.proto',
-    ],
-    'tests.unit': [
-        'credentials/ca.pem',
-        'credentials/server1.key',
-        'credentials/server1.pem',
-    ],
-}
-
-TESTS_REQUIRE = (
-    'oauth2client>=2.1.0',
-    'protobuf>=3.0.0a3',
-    'coverage>=4.0',
-) + INSTALL_REQUIRES
-
-TEST_SUITE = 'tests'
-TEST_LOADER = 'tests:Loader'
-TEST_RUNNER = 'tests:Runner'
-
 PACKAGE_DATA = {
     # Binaries that may or may not be present in the final installation, but are
     # mentioned here for completeness.
@@ -232,12 +231,7 @@ PACKAGE_DATA = {
         '_windows/grpc_c.64.python',
     ],
 }
-if INSTALL_TESTS:
-  PACKAGE_DATA = dict(PACKAGE_DATA, **TEST_PACKAGE_DATA)
-  PACKAGES = setuptools.find_packages(PYTHON_STEM)
-else:
-  PACKAGES = setuptools.find_packages(
-      PYTHON_STEM, exclude=['tests', 'tests.*'])
+PACKAGES = setuptools.find_packages(PYTHON_STEM)
 
 setuptools.setup(
   name='grpcio',
@@ -250,8 +244,4 @@ setuptools.setup(
   install_requires=INSTALL_REQUIRES,
   setup_requires=SETUP_REQUIRES,
   cmdclass=COMMAND_CLASS,
-  tests_require=TESTS_REQUIRE,
-  test_suite=TEST_SUITE,
-  test_loader=TEST_LOADER,
-  test_runner=TEST_RUNNER,
 )
diff --git a/src/compiler/config.h b/src/compiler/config.h
index 1cbd842f0af4ec251ae02d17a3b12ba16f5c02ef..ba44cd8a31c4e86c73211fb2d98ba4b1e8b0d46d 100644
--- a/src/compiler/config.h
+++ b/src/compiler/config.h
@@ -60,7 +60,8 @@
 
 #ifndef GRPC_CUSTOM_PARSEGENERATORPARAMETER
 #include <google/protobuf/compiler/code_generator.h>
-#define GRPC_CUSTOM_PARSEGENERATORPARAMETER ::google::protobuf::compiler::ParseGeneratorParameter
+#define GRPC_CUSTOM_PARSEGENERATORPARAMETER \
+  ::google::protobuf::compiler::ParseGeneratorParameter
 #endif
 
 #ifndef GRPC_CUSTOM_STRING
@@ -81,8 +82,8 @@ static inline int PluginMain(int argc, char* argv[],
                              const CodeGenerator* generator) {
   return GRPC_CUSTOM_PLUGINMAIN(argc, argv, generator);
 }
-static inline void ParseGeneratorParameter(const string& parameter,
-    std::vector<std::pair<string, string> >* options) {
+static inline void ParseGeneratorParameter(
+    const string& parameter, std::vector<std::pair<string, string> >* options) {
   GRPC_CUSTOM_PARSEGENERATORPARAMETER(parameter, options);
 }
 
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index 2288ba4163685ce50f425cd2e1e7ca26a4f65045..c386115ec200f05d0447100995b25a4a56715cc8 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -64,19 +64,22 @@ grpc::string FilenameIdentifier(const grpc::string &filename) {
 }
 }  // namespace
 
-template<class T, size_t N>
-T *array_end(T (&array)[N]) { return array + N; }
+template <class T, size_t N>
+T *array_end(T (&array)[N]) {
+  return array + N;
+}
 
-void PrintIncludes(Printer *printer, const std::vector<grpc::string>& headers, const Parameters &params) {
+void PrintIncludes(Printer *printer, const std::vector<grpc::string> &headers,
+                   const Parameters &params) {
   std::map<grpc::string, grpc::string> vars;
 
   vars["l"] = params.use_system_headers ? '<' : '"';
   vars["r"] = params.use_system_headers ? '>' : '"';
 
-  auto& s = params.grpc_search_path;
+  auto &s = params.grpc_search_path;
   if (!s.empty()) {
     vars["l"] += s;
-    if (s[s.size()-1] != '/') {
+    if (s[s.size() - 1] != '/') {
       vars["l"] += '/';
     }
   }
@@ -101,7 +104,7 @@ grpc::string GetHeaderPrologue(File *file, const Parameters & /*params*/) {
 
     printer->Print(vars, "// Generated by the gRPC protobuf plugin.\n");
     printer->Print(vars,
-                  "// If you make any local change, they will be lost.\n");
+                   "// If you make any local change, they will be lost.\n");
     printer->Print(vars, "// source: $filename$\n");
     grpc::string leading_comments = file->GetLeadingComments();
     if (!leading_comments.empty()) {
@@ -117,8 +120,7 @@ grpc::string GetHeaderPrologue(File *file, const Parameters & /*params*/) {
   return output;
 }
 
-grpc::string GetHeaderIncludes(File *file,
-                               const Parameters &params) {
+grpc::string GetHeaderIncludes(File *file, const Parameters &params) {
   grpc::string output;
   {
     // Scope the output stream so it closes and finalizes output to the string.
@@ -126,15 +128,14 @@ grpc::string GetHeaderIncludes(File *file,
     std::map<grpc::string, grpc::string> vars;
 
     static const char *headers_strs[] = {
-      "grpc++/impl/codegen/async_stream.h",
-      "grpc++/impl/codegen/async_unary_call.h",
-      "grpc++/impl/codegen/proto_utils.h",
-      "grpc++/impl/codegen/rpc_method.h",
-      "grpc++/impl/codegen/service_type.h",
-      "grpc++/impl/codegen/status.h",
-      "grpc++/impl/codegen/stub_options.h",
-      "grpc++/impl/codegen/sync_stream.h"
-    };
+        "grpc++/impl/codegen/async_stream.h",
+        "grpc++/impl/codegen/async_unary_call.h",
+        "grpc++/impl/codegen/proto_utils.h",
+        "grpc++/impl/codegen/rpc_method.h",
+        "grpc++/impl/codegen/service_type.h",
+        "grpc++/impl/codegen/status.h",
+        "grpc++/impl/codegen/stub_options.h",
+        "grpc++/impl/codegen/sync_stream.h"};
     std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
     PrintIncludes(printer.get(), headers, params);
     printer->Print(vars, "\n");
@@ -309,8 +310,7 @@ void PrintHeaderClientMethodInterfaces(
   }
 }
 
-void PrintHeaderClientMethod(Printer *printer,
-                             const Method *method,
+void PrintHeaderClientMethod(Printer *printer, const Method *method,
                              std::map<grpc::string, grpc::string> *vars,
                              bool is_public) {
   (*vars)["Method"] = method->name();
@@ -490,10 +490,8 @@ void PrintHeaderServerMethodSync(Printer *printer, const Method *method,
   printer->Print(method->GetTrailingComments().c_str());
 }
 
-void PrintHeaderServerMethodAsync(
-    Printer *printer,
-    const Method *method,
-    std::map<grpc::string, grpc::string> *vars) {
+void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
+                                  std::map<grpc::string, grpc::string> *vars) {
   (*vars)["Method"] = method->name();
   (*vars)["Request"] = method->input_type_name();
   (*vars)["Response"] = method->output_type_name();
@@ -607,8 +605,7 @@ void PrintHeaderServerMethodAsync(
 }
 
 void PrintHeaderServerMethodGeneric(
-    Printer *printer,
-    const Method *method,
+    Printer *printer, const Method *method,
     std::map<grpc::string, grpc::string> *vars) {
   (*vars)["Method"] = method->name();
   (*vars)["Request"] = method->input_type_name();
@@ -677,8 +674,7 @@ void PrintHeaderServerMethodGeneric(
   printer->Print(*vars, "};\n");
 }
 
-void PrintHeaderService(Printer *printer,
-                        const Service *service,
+void PrintHeaderService(Printer *printer, const Service *service,
                         std::map<grpc::string, grpc::string> *vars) {
   (*vars)["Service"] = service->name();
 
@@ -696,14 +692,16 @@ void PrintHeaderService(Printer *printer,
   printer->Print("virtual ~StubInterface() {}\n");
   for (int i = 0; i < service->method_count(); ++i) {
     printer->Print(service->method(i)->GetLeadingComments().c_str());
-    PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, true);
+    PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars,
+                                      true);
     printer->Print(service->method(i)->GetTrailingComments().c_str());
   }
   printer->Outdent();
   printer->Print("private:\n");
   printer->Indent();
   for (int i = 0; i < service->method_count(); ++i) {
-    PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, false);
+    PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars,
+                                      false);
   }
   printer->Outdent();
   printer->Print("};\n");
@@ -711,7 +709,8 @@ void PrintHeaderService(Printer *printer,
       "class Stub GRPC_FINAL : public StubInterface"
       " {\n public:\n");
   printer->Indent();
-  printer->Print("Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
+  printer->Print(
+      "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
   for (int i = 0; i < service->method_count(); ++i) {
     PrintHeaderClientMethod(printer, service->method(i).get(), vars, true);
   }
@@ -776,8 +775,7 @@ void PrintHeaderService(Printer *printer,
   printer->Print(service->GetTrailingComments().c_str());
 }
 
-grpc::string GetHeaderServices(File *file,
-                               const Parameters &params) {
+grpc::string GetHeaderServices(File *file, const Parameters &params) {
   grpc::string output;
   {
     // Scope the output stream so it closes and finalizes output to the string.
@@ -849,7 +847,7 @@ grpc::string GetSourcePrologue(File *file, const Parameters & /*params*/) {
 
     printer->Print(vars, "// Generated by the gRPC protobuf plugin.\n");
     printer->Print(vars,
-                  "// If you make any local change, they will be lost.\n");
+                   "// If you make any local change, they will be lost.\n");
     printer->Print(vars, "// source: $filename$\n\n");
 
     printer->Print(vars, "#include \"$filename_base$$message_header_ext$\"\n");
@@ -860,8 +858,7 @@ grpc::string GetSourcePrologue(File *file, const Parameters & /*params*/) {
   return output;
 }
 
-grpc::string GetSourceIncludes(File *file,
-                               const Parameters &params) {
+grpc::string GetSourceIncludes(File *file, const Parameters &params) {
   grpc::string output;
   {
     // Scope the output stream so it closes and finalizes output to the string.
@@ -869,15 +866,14 @@ grpc::string GetSourceIncludes(File *file,
     std::map<grpc::string, grpc::string> vars;
 
     static const char *headers_strs[] = {
-      "grpc++/impl/codegen/async_stream.h",
-      "grpc++/impl/codegen/async_unary_call.h",
-      "grpc++/impl/codegen/channel_interface.h",
-      "grpc++/impl/codegen/client_unary_call.h",
-      "grpc++/impl/codegen/method_handler_impl.h",
-      "grpc++/impl/codegen/rpc_service_method.h",
-      "grpc++/impl/codegen/service_type.h",
-      "grpc++/impl/codegen/sync_stream.h"
-    };
+        "grpc++/impl/codegen/async_stream.h",
+        "grpc++/impl/codegen/async_unary_call.h",
+        "grpc++/impl/codegen/channel_interface.h",
+        "grpc++/impl/codegen/client_unary_call.h",
+        "grpc++/impl/codegen/method_handler_impl.h",
+        "grpc++/impl/codegen/rpc_service_method.h",
+        "grpc++/impl/codegen/service_type.h",
+        "grpc++/impl/codegen/sync_stream.h"};
     std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
     PrintIncludes(printer.get(), headers, params);
 
@@ -895,8 +891,7 @@ grpc::string GetSourceIncludes(File *file,
   return output;
 }
 
-void PrintSourceClientMethod(Printer *printer,
-                             const Method *method,
+void PrintSourceClientMethod(Printer *printer, const Method *method,
                              std::map<grpc::string, grpc::string> *vars) {
   (*vars)["Method"] = method->name();
   (*vars)["Request"] = method->input_type_name();
@@ -996,8 +991,7 @@ void PrintSourceClientMethod(Printer *printer,
   }
 }
 
-void PrintSourceServerMethod(Printer *printer,
-                             const Method *method,
+void PrintSourceServerMethod(Printer *printer, const Method *method,
                              std::map<grpc::string, grpc::string> *vars) {
   (*vars)["Method"] = method->name();
   (*vars)["Request"] = method->input_type_name();
@@ -1055,8 +1049,7 @@ void PrintSourceServerMethod(Printer *printer,
   }
 }
 
-void PrintSourceService(Printer *printer,
-                        const Service *service,
+void PrintSourceService(Printer *printer, const Service *service,
                         std::map<grpc::string, grpc::string> *vars) {
   (*vars)["Service"] = service->name();
 
@@ -1168,8 +1161,7 @@ void PrintSourceService(Printer *printer,
   }
 }
 
-grpc::string GetSourceServices(File *file,
-                               const Parameters &params) {
+grpc::string GetSourceServices(File *file, const Parameters &params) {
   grpc::string output;
   {
     // Scope the output stream so it closes and finalizes output to the string.
diff --git a/src/compiler/cpp_plugin.cc b/src/compiler/cpp_plugin.cc
index fc0296cd282edb3ea8e5c81cbc191a282ef89560..38f8f738ed93dceb1f47749b6b7bc6e20f028566 100644
--- a/src/compiler/cpp_plugin.cc
+++ b/src/compiler/cpp_plugin.cc
@@ -48,7 +48,7 @@ using grpc_cpp_generator::GetCppComments;
 class ProtoBufMethod : public grpc_cpp_generator::Method {
  public:
   ProtoBufMethod(const grpc::protobuf::MethodDescriptor *method)
-    : method_(method) {}
+      : method_(method) {}
 
   grpc::string name() const { return method_->name(); }
 
@@ -90,14 +90,14 @@ class ProtoBufMethod : public grpc_cpp_generator::Method {
 class ProtoBufService : public grpc_cpp_generator::Service {
  public:
   ProtoBufService(const grpc::protobuf::ServiceDescriptor *service)
-    : service_(service) {}
+      : service_(service) {}
 
   grpc::string name() const { return service_->name(); }
 
   int method_count() const { return service_->method_count(); };
   std::unique_ptr<const grpc_cpp_generator::Method> method(int i) const {
     return std::unique_ptr<const grpc_cpp_generator::Method>(
-          new ProtoBufMethod(service_->method(i)));
+        new ProtoBufMethod(service_->method(i)));
   };
 
   grpc::string GetLeadingComments() const {
@@ -115,7 +115,7 @@ class ProtoBufService : public grpc_cpp_generator::Service {
 class ProtoBufPrinter : public grpc_cpp_generator::Printer {
  public:
   ProtoBufPrinter(grpc::string *str)
-    : output_stream_(str), printer_(&output_stream_, '$') {}
+      : output_stream_(str), printer_(&output_stream_, '$') {}
 
   void Print(const std::map<grpc::string, grpc::string> &vars,
              const char *string_template) {
@@ -152,13 +152,14 @@ class ProtoBufFile : public grpc_cpp_generator::File {
 
   int service_count() const { return file_->service_count(); };
   std::unique_ptr<const grpc_cpp_generator::Service> service(int i) const {
-    return std::unique_ptr<const grpc_cpp_generator::Service> (
-          new ProtoBufService(file_->service(i)));
+    return std::unique_ptr<const grpc_cpp_generator::Service>(
+        new ProtoBufService(file_->service(i)));
   }
 
-  std::unique_ptr<grpc_cpp_generator::Printer> CreatePrinter(grpc::string *str) const {
+  std::unique_ptr<grpc_cpp_generator::Printer> CreatePrinter(
+      grpc::string *str) const {
     return std::unique_ptr<grpc_cpp_generator::Printer>(
-          new ProtoBufPrinter(str));
+        new ProtoBufPrinter(str));
   }
 
   grpc::string GetLeadingComments() const {
@@ -197,12 +198,11 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
 
     if (!parameter.empty()) {
       std::vector<grpc::string> parameters_list =
-        grpc_generator::tokenize(parameter, ",");
+          grpc_generator::tokenize(parameter, ",");
       for (auto parameter_string = parameters_list.begin();
-           parameter_string != parameters_list.end();
-           parameter_string++) {
+           parameter_string != parameters_list.end(); parameter_string++) {
         std::vector<grpc::string> param =
-          grpc_generator::tokenize(*parameter_string, "=");
+            grpc_generator::tokenize(*parameter_string, "=");
         if (param[0] == "services_namespace") {
           generator_parameters.services_namespace = param[1];
         } else if (param[0] == "use_system_headers") {
@@ -232,8 +232,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
         grpc_cpp_generator::GetHeaderEpilogue(&pbfile, generator_parameters);
     std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> header_output(
         context->Open(file_name + ".grpc.pb.h"));
-    grpc::protobuf::io::CodedOutputStream header_coded_out(
-        header_output.get());
+    grpc::protobuf::io::CodedOutputStream header_coded_out(header_output.get());
     header_coded_out.WriteRaw(header_code.data(), header_code.size());
 
     grpc::string source_code =
@@ -243,8 +242,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
         grpc_cpp_generator::GetSourceEpilogue(&pbfile, generator_parameters);
     std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> source_output(
         context->Open(file_name + ".grpc.pb.cc"));
-    grpc::protobuf::io::CodedOutputStream source_coded_out(
-        source_output.get());
+    grpc::protobuf::io::CodedOutputStream source_coded_out(source_output.get());
     source_coded_out.WriteRaw(source_code.data(), source_code.size());
 
     return true;
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index fc8feaf0fcc9bd36a1e6b22fb881a86360e9cb59..591e5ae3d42d81a5a7da8d18027256bb7130d20c 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -36,11 +36,10 @@
 #include <sstream>
 #include <vector>
 
-#include "src/compiler/csharp_generator.h"
 #include "src/compiler/config.h"
-#include "src/compiler/csharp_generator_helpers.h"
 #include "src/compiler/csharp_generator.h"
-
+#include "src/compiler/csharp_generator.h"
+#include "src/compiler/csharp_generator_helpers.h"
 
 using google::protobuf::compiler::csharp::GetFileNamespace;
 using google::protobuf::compiler::csharp::GetClassName;
@@ -61,7 +60,6 @@ using grpc_generator::StringReplace;
 using std::map;
 using std::vector;
 
-
 namespace grpc_csharp_generator {
 namespace {
 
@@ -70,34 +68,43 @@ namespace {
 // Currently, we cannot easily reuse the functionality as
 // google/protobuf/compiler/csharp/csharp_doc_comment.h is not a public header.
 // TODO(jtattermusch): reuse the functionality from google/protobuf.
-void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer, grpc::protobuf::SourceLocation location) {
-    grpc::string comments = location.leading_comments.empty() ?
-        location.trailing_comments : location.leading_comments;
+void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer,
+                                grpc::protobuf::SourceLocation location) {
+  grpc::string comments = location.leading_comments.empty()
+                              ? location.trailing_comments
+                              : location.leading_comments;
   if (comments.empty()) {
     return;
   }
-  // XML escaping... no need for apostrophes etc as the whole text is going to be a child
+  // XML escaping... no need for apostrophes etc as the whole text is going to
+  // be a child
   // node of a summary element, not part of an attribute.
   comments = grpc_generator::StringReplace(comments, "&", "&amp;", true);
   comments = grpc_generator::StringReplace(comments, "<", "&lt;", true);
 
   std::vector<grpc::string> lines;
   grpc_generator::Split(comments, '\n', &lines);
-  // TODO: We really should work out which part to put in the summary and which to put in the remarks...
-  // but that needs to be part of a bigger effort to understand the markdown better anyway.
+  // TODO: We really should work out which part to put in the summary and which
+  // to put in the remarks...
+  // but that needs to be part of a bigger effort to understand the markdown
+  // better anyway.
   printer->Print("/// <summary>\n");
   bool last_was_empty = false;
-  // We squash multiple blank lines down to one, and remove any trailing blank lines. We need
-  // to preserve the blank lines themselves, as this is relevant in the markdown.
-  // Note that we can't remove leading or trailing whitespace as *that's* relevant in markdown too.
+  // We squash multiple blank lines down to one, and remove any trailing blank
+  // lines. We need
+  // to preserve the blank lines themselves, as this is relevant in the
+  // markdown.
+  // Note that we can't remove leading or trailing whitespace as *that's*
+  // relevant in markdown too.
   // (We don't skip "just whitespace" lines, either.)
-  for (std::vector<grpc::string>::iterator it = lines.begin(); it != lines.end(); ++it) {
+  for (std::vector<grpc::string>::iterator it = lines.begin();
+       it != lines.end(); ++it) {
     grpc::string line = *it;
     if (line.empty()) {
       last_was_empty = true;
     } else {
       if (last_was_empty) {
-          printer->Print("///\n");
+        printer->Print("///\n");
       }
       last_was_empty = false;
       printer->Print("/// $line$\n", "line", *it);
@@ -107,23 +114,23 @@ void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer, grpc::prot
 }
 
 template <typename DescriptorType>
-void GenerateDocCommentBody(
-  grpc::protobuf::io::Printer* printer, const DescriptorType* descriptor) {
+void GenerateDocCommentBody(grpc::protobuf::io::Printer *printer,
+                            const DescriptorType *descriptor) {
   grpc::protobuf::SourceLocation location;
   if (descriptor->GetSourceLocation(&location)) {
     GenerateDocCommentBodyImpl(printer, location);
   }
 }
 
-std::string GetServiceClassName(const ServiceDescriptor* service) {
+std::string GetServiceClassName(const ServiceDescriptor *service) {
   return service->name();
 }
 
-std::string GetClientClassName(const ServiceDescriptor* service) {
+std::string GetClientClassName(const ServiceDescriptor *service) {
   return service->name() + "Client";
 }
 
-std::string GetServerClassName(const ServiceDescriptor* service) {
+std::string GetServerClassName(const ServiceDescriptor *service) {
   return service->name() + "Base";
 }
 
@@ -138,13 +145,11 @@ std::string GetCSharpMethodType(MethodType method_type) {
     case METHODTYPE_BIDI_STREAMING:
       return "MethodType.DuplexStreaming";
   }
-  GOOGLE_LOG(FATAL)<< "Can't get here.";
+  GOOGLE_LOG(FATAL) << "Can't get here.";
   return "";
 }
 
-std::string GetServiceNameFieldName() {
-  return "__ServiceName";
-}
+std::string GetServiceNameFieldName() { return "__ServiceName"; }
 
 std::string GetMarshallerFieldName(const Descriptor *message) {
   return "__Marshaller_" + message->name();
@@ -155,7 +160,7 @@ std::string GetMethodFieldName(const MethodDescriptor *method) {
 }
 
 std::string GetMethodRequestParamMaybe(const MethodDescriptor *method,
-                                       bool invocation_param=false) {
+                                       bool invocation_param = false) {
   if (method->client_streaming()) {
     return "";
   }
@@ -174,16 +179,16 @@ std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
     case METHODTYPE_NO_STREAMING:
       return "AsyncUnaryCall<" + GetClassName(method->output_type()) + ">";
     case METHODTYPE_CLIENT_STREAMING:
-      return "AsyncClientStreamingCall<" + GetClassName(method->input_type())
-          + ", " + GetClassName(method->output_type()) + ">";
+      return "AsyncClientStreamingCall<" + GetClassName(method->input_type()) +
+             ", " + GetClassName(method->output_type()) + ">";
     case METHODTYPE_SERVER_STREAMING:
-      return "AsyncServerStreamingCall<" + GetClassName(method->output_type())
-          + ">";
+      return "AsyncServerStreamingCall<" + GetClassName(method->output_type()) +
+             ">";
     case METHODTYPE_BIDI_STREAMING:
-      return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type())
-          + ", " + GetClassName(method->output_type()) + ">";
+      return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type()) +
+             ", " + GetClassName(method->output_type()) + ">";
   }
-  GOOGLE_LOG(FATAL)<< "Can't get here.";
+  GOOGLE_LOG(FATAL) << "Can't get here.";
   return "";
 }
 
@@ -194,10 +199,10 @@ std::string GetMethodRequestParamServer(const MethodDescriptor *method) {
       return GetClassName(method->input_type()) + " request";
     case METHODTYPE_CLIENT_STREAMING:
     case METHODTYPE_BIDI_STREAMING:
-      return "IAsyncStreamReader<" + GetClassName(method->input_type())
-          + "> requestStream";
+      return "IAsyncStreamReader<" + GetClassName(method->input_type()) +
+             "> requestStream";
   }
-  GOOGLE_LOG(FATAL)<< "Can't get here.";
+  GOOGLE_LOG(FATAL) << "Can't get here.";
   return "";
 }
 
@@ -205,12 +210,13 @@ std::string GetMethodReturnTypeServer(const MethodDescriptor *method) {
   switch (GetMethodType(method)) {
     case METHODTYPE_NO_STREAMING:
     case METHODTYPE_CLIENT_STREAMING:
-      return "global::System.Threading.Tasks.Task<" + GetClassName(method->output_type()) + ">";
+      return "global::System.Threading.Tasks.Task<" +
+             GetClassName(method->output_type()) + ">";
     case METHODTYPE_SERVER_STREAMING:
     case METHODTYPE_BIDI_STREAMING:
       return "global::System.Threading.Tasks.Task";
   }
-  GOOGLE_LOG(FATAL)<< "Can't get here.";
+  GOOGLE_LOG(FATAL) << "Can't get here.";
   return "";
 }
 
@@ -221,18 +227,19 @@ std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) {
       return "";
     case METHODTYPE_SERVER_STREAMING:
     case METHODTYPE_BIDI_STREAMING:
-      return ", IServerStreamWriter<" + GetClassName(method->output_type())
-          + "> responseStream";
+      return ", IServerStreamWriter<" + GetClassName(method->output_type()) +
+             "> responseStream";
   }
-  GOOGLE_LOG(FATAL)<< "Can't get here.";
+  GOOGLE_LOG(FATAL) << "Can't get here.";
   return "";
 }
 
 // Gets vector of all messages used as input or output types.
-std::vector<const Descriptor*> GetUsedMessages(
+std::vector<const Descriptor *> GetUsedMessages(
     const ServiceDescriptor *service) {
-  std::set<const Descriptor*> descriptor_set;
-  std::vector<const Descriptor*> result;  // vector is to maintain stable ordering
+  std::set<const Descriptor *> descriptor_set;
+  std::vector<const Descriptor *>
+      result;  // vector is to maintain stable ordering
   for (int i = 0; i < service->method_count(); i++) {
     const MethodDescriptor *method = service->method(i);
     if (descriptor_set.find(method->input_type()) == descriptor_set.end()) {
@@ -247,21 +254,25 @@ std::vector<const Descriptor*> GetUsedMessages(
   return result;
 }
 
-void GenerateMarshallerFields(Printer* out, const ServiceDescriptor *service) {
-  std::vector<const Descriptor*> used_messages = GetUsedMessages(service);
+void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) {
+  std::vector<const Descriptor *> used_messages = GetUsedMessages(service);
   for (size_t i = 0; i < used_messages.size(); i++) {
     const Descriptor *message = used_messages[i];
     out->Print(
-        "static readonly Marshaller<$type$> $fieldname$ = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), $type$.Parser.ParseFrom);\n",
+        "static readonly Marshaller<$type$> $fieldname$ = "
+        "Marshallers.Create((arg) => "
+        "global::Google.Protobuf.MessageExtensions.ToByteArray(arg), "
+        "$type$.Parser.ParseFrom);\n",
         "fieldname", GetMarshallerFieldName(message), "type",
         GetClassName(message));
   }
   out->Print("\n");
 }
 
-void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
+void GenerateStaticMethodField(Printer *out, const MethodDescriptor *method) {
   out->Print(
-      "static readonly Method<$request$, $response$> $fieldname$ = new Method<$request$, $response$>(\n",
+      "static readonly Method<$request$, $response$> $fieldname$ = new "
+      "Method<$request$, $response$>(\n",
       "fieldname", GetMethodFieldName(method), "request",
       GetClassName(method->input_type()), "response",
       GetClassName(method->output_type()));
@@ -270,7 +281,7 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
   out->Print("$methodtype$,\n", "methodtype",
              GetCSharpMethodType(GetMethodType(method)));
   out->Print("$servicenamefield$,\n", "servicenamefield",
-               GetServiceNameFieldName());
+             GetServiceNameFieldName());
   out->Print("\"$methodname$\",\n", "methodname", method->name());
   out->Print("$requestmarshaller$,\n", "requestmarshaller",
              GetMarshallerFieldName(method->input_type()));
@@ -281,11 +292,14 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
   out->Outdent();
 }
 
-void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *service) {
+void GenerateServiceDescriptorProperty(Printer *out,
+                                       const ServiceDescriptor *service) {
   std::ostringstream index;
   index << service->index();
   out->Print("/// <summary>Service descriptor</summary>\n");
-  out->Print("public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor\n");
+  out->Print(
+      "public static global::Google.Protobuf.Reflection.ServiceDescriptor "
+      "Descriptor\n");
   out->Print("{\n");
   out->Print("  get { return $umbrella$.Descriptor.Services[$index$]; }\n",
              "umbrella", GetReflectionClassName(service->file()), "index",
@@ -294,9 +308,11 @@ void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *se
   out->Print("\n");
 }
 
-void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
-  out->Print("/// <summary>Base class for server-side implementations of $servicename$</summary>\n",
-             "servicename", GetServiceClassName(service));
+void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
+  out->Print(
+      "/// <summary>Base class for server-side implementations of "
+      "$servicename$</summary>\n",
+      "servicename", GetServiceClassName(service));
   out->Print("public abstract class $name$\n", "name",
              GetServerClassName(service));
   out->Print("{\n");
@@ -305,7 +321,8 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
     const MethodDescriptor *method = service->method(i);
     GenerateDocCommentBody(out, method);
     out->Print(
-        "public virtual $returntype$ $methodname$($request$$response_stream_maybe$, "
+        "public virtual $returntype$ "
+        "$methodname$($request$$response_stream_maybe$, "
         "ServerCallContext context)\n",
         "methodname", method->name(), "returntype",
         GetMethodReturnTypeServer(method), "request",
@@ -313,8 +330,9 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
         GetMethodResponseStreamMaybe(method));
     out->Print("{\n");
     out->Indent();
-    out->Print("throw new RpcException("
-               "new Status(StatusCode.Unimplemented, \"\"));\n");
+    out->Print(
+        "throw new RpcException("
+        "new Status(StatusCode.Unimplemented, \"\"));\n");
     out->Outdent();
     out->Print("}\n\n");
   }
@@ -323,35 +341,49 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
   out->Print("\n");
 }
 
-void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
-  out->Print("/// <summary>Client for $servicename$</summary>\n",
-             "servicename", GetServiceClassName(service));
-  out->Print(
-      "public class $name$ : ClientBase<$name$>\n",
-      "name", GetClientClassName(service));
+void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
+  out->Print("/// <summary>Client for $servicename$</summary>\n", "servicename",
+             GetServiceClassName(service));
+  out->Print("public class $name$ : ClientBase<$name$>\n", "name",
+             GetClientClassName(service));
   out->Print("{\n");
   out->Indent();
 
   // constructors
-  out->Print("public $name$(Channel channel) : base(channel)\n",
-             "name", GetClientClassName(service));
+  out->Print(
+      "/// <summary>Creates a new client for $servicename$</summary>\n"
+      "/// <param name=\"channel\">The channel to use to make remote "
+      "calls.</param>\n",
+      "servicename", GetServiceClassName(service));
+  out->Print("public $name$(Channel channel) : base(channel)\n", "name",
+             GetClientClassName(service));
   out->Print("{\n");
   out->Print("}\n");
+  out->Print(
+      "/// <summary>Creates a new client for $servicename$ that uses a custom "
+      "<c>CallInvoker</c>.</summary>\n"
+      "/// <param name=\"callInvoker\">The callInvoker to use to make remote "
+      "calls.</param>\n",
+      "servicename", GetServiceClassName(service));
   out->Print("public $name$(CallInvoker callInvoker) : base(callInvoker)\n",
              "name", GetClientClassName(service));
   out->Print("{\n");
   out->Print("}\n");
-  out->Print("///<summary>Protected parameterless constructor to allow creation"
-             " of test doubles.</summary>\n");
-  out->Print("protected $name$() : base()\n",
-             "name", GetClientClassName(service));
+  out->Print(
+      "/// <summary>Protected parameterless constructor to allow creation"
+      " of test doubles.</summary>\n");
+  out->Print("protected $name$() : base()\n", "name",
+             GetClientClassName(service));
   out->Print("{\n");
   out->Print("}\n");
-  out->Print("///<summary>Protected constructor to allow creation of configured"
-             " clients.</summary>\n");
-  out->Print("protected $name$(ClientBaseConfiguration configuration)"
-             " : base(configuration)\n",
-             "name", GetClientClassName(service));
+  out->Print(
+      "/// <summary>Protected constructor to allow creation of configured "
+      "clients.</summary>\n"
+      "/// <param name=\"configuration\">The client configuration.</param>\n");
+  out->Print(
+      "protected $name$(ClientBaseConfiguration configuration)"
+      " : base(configuration)\n",
+      "name", GetClientClassName(service));
   out->Print("{\n");
   out->Print("}\n\n");
 
@@ -362,27 +394,36 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
     if (method_type == METHODTYPE_NO_STREAMING) {
       // unary calls have an extra synchronous stub method
       GenerateDocCommentBody(out, method);
-      out->Print("public virtual $response$ $methodname$($request$ request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))\n",
+      out->Print(
+          "public virtual $response$ $methodname$($request$ request, Metadata "
+          "headers = null, DateTime? deadline = null, CancellationToken "
+          "cancellationToken = default(CancellationToken))\n",
           "methodname", method->name(), "request",
           GetClassName(method->input_type()), "response",
           GetClassName(method->output_type()));
       out->Print("{\n");
       out->Indent();
-      out->Print("return $methodname$(request, new CallOptions(headers, deadline, cancellationToken));\n",
-                 "methodname", method->name());
+      out->Print(
+          "return $methodname$(request, new CallOptions(headers, deadline, "
+          "cancellationToken));\n",
+          "methodname", method->name());
       out->Outdent();
       out->Print("}\n");
 
       // overload taking CallOptions as a param
       GenerateDocCommentBody(out, method);
-      out->Print("public virtual $response$ $methodname$($request$ request, CallOptions options)\n",
+      out->Print(
+          "public virtual $response$ $methodname$($request$ request, "
+          "CallOptions options)\n",
           "methodname", method->name(), "request",
           GetClassName(method->input_type()), "response",
           GetClassName(method->output_type()));
       out->Print("{\n");
       out->Indent();
-      out->Print("return CallInvoker.BlockingUnaryCall($methodfield$, null, options, request);\n",
-                 "methodfield", GetMethodFieldName(method));
+      out->Print(
+          "return CallInvoker.BlockingUnaryCall($methodfield$, null, options, "
+          "request);\n",
+          "methodfield", GetMethodFieldName(method));
       out->Outdent();
       out->Print("}\n");
     }
@@ -393,23 +434,28 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
     }
     GenerateDocCommentBody(out, method);
     out->Print(
-            "public virtual $returntype$ $methodname$($request_maybe$Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))\n",
-            "methodname", method_name, "request_maybe",
-            GetMethodRequestParamMaybe(method), "returntype",
-            GetMethodReturnTypeClient(method));
+        "public virtual $returntype$ $methodname$($request_maybe$Metadata "
+        "headers = null, DateTime? deadline = null, CancellationToken "
+        "cancellationToken = default(CancellationToken))\n",
+        "methodname", method_name, "request_maybe",
+        GetMethodRequestParamMaybe(method), "returntype",
+        GetMethodReturnTypeClient(method));
     out->Print("{\n");
     out->Indent();
 
-    out->Print("return $methodname$($request_maybe$new CallOptions(headers, deadline, cancellationToken));\n",
-               "methodname", method_name,
-               "request_maybe", GetMethodRequestParamMaybe(method, true));
+    out->Print(
+        "return $methodname$($request_maybe$new CallOptions(headers, deadline, "
+        "cancellationToken));\n",
+        "methodname", method_name, "request_maybe",
+        GetMethodRequestParamMaybe(method, true));
     out->Outdent();
     out->Print("}\n");
 
     // overload taking CallOptions as a param
     GenerateDocCommentBody(out, method);
     out->Print(
-        "public virtual $returntype$ $methodname$($request_maybe$CallOptions options)\n",
+        "public virtual $returntype$ $methodname$($request_maybe$CallOptions "
+        "options)\n",
         "methodname", method_name, "request_maybe",
         GetMethodRequestParamMaybe(method), "returntype",
         GetMethodReturnTypeClient(method));
@@ -417,36 +463,45 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
     out->Indent();
     switch (GetMethodType(method)) {
       case METHODTYPE_NO_STREAMING:
-        out->Print("return CallInvoker.AsyncUnaryCall($methodfield$, null, options, request);\n",
-                   "methodfield", GetMethodFieldName(method));
+        out->Print(
+            "return CallInvoker.AsyncUnaryCall($methodfield$, null, options, "
+            "request);\n",
+            "methodfield", GetMethodFieldName(method));
         break;
       case METHODTYPE_CLIENT_STREAMING:
-        out->Print("return CallInvoker.AsyncClientStreamingCall($methodfield$, null, options);\n",
-                   "methodfield", GetMethodFieldName(method));
+        out->Print(
+            "return CallInvoker.AsyncClientStreamingCall($methodfield$, null, "
+            "options);\n",
+            "methodfield", GetMethodFieldName(method));
         break;
       case METHODTYPE_SERVER_STREAMING:
         out->Print(
-            "return CallInvoker.AsyncServerStreamingCall($methodfield$, null, options, request);\n",
+            "return CallInvoker.AsyncServerStreamingCall($methodfield$, null, "
+            "options, request);\n",
             "methodfield", GetMethodFieldName(method));
         break;
       case METHODTYPE_BIDI_STREAMING:
-        out->Print("return CallInvoker.AsyncDuplexStreamingCall($methodfield$, null, options);\n",
-                   "methodfield", GetMethodFieldName(method));
+        out->Print(
+            "return CallInvoker.AsyncDuplexStreamingCall($methodfield$, null, "
+            "options);\n",
+            "methodfield", GetMethodFieldName(method));
         break;
       default:
-        GOOGLE_LOG(FATAL)<< "Can't get here.";
+        GOOGLE_LOG(FATAL) << "Can't get here.";
     }
     out->Outdent();
     out->Print("}\n");
   }
 
   // override NewInstance method
-  out->Print("protected override $name$ NewInstance(ClientBaseConfiguration configuration)\n",
-             "name", GetClientClassName(service));
+  out->Print(
+      "protected override $name$ NewInstance(ClientBaseConfiguration "
+      "configuration)\n",
+      "name", GetClientClassName(service));
   out->Print("{\n");
   out->Indent();
-  out->Print("return new $name$(configuration);\n",
-             "name", GetClientClassName(service));
+  out->Print("return new $name$(configuration);\n", "name",
+             GetClientClassName(service));
   out->Outdent();
   out->Print("}\n");
 
@@ -455,11 +510,13 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
   out->Print("\n");
 }
 
-void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) {
+void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) {
   out->Print(
-      "/// <summary>Creates service definition that can be registered with a server</summary>\n");
+      "/// <summary>Creates service definition that can be registered with a "
+      "server</summary>\n");
   out->Print(
-      "public static ServerServiceDefinition BindService($implclass$ serviceImpl)\n",
+      "public static ServerServiceDefinition BindService($implclass$ "
+      "serviceImpl)\n",
       "implclass", GetServerClassName(service));
   out->Print("{\n");
   out->Indent();
@@ -485,21 +542,7 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) {
   out->Print("\n");
 }
 
-void GenerateNewStubMethods(Printer* out, const ServiceDescriptor *service) {
-  out->Print("/// <summary>Creates a new client for $servicename$</summary>\n",
-             "servicename", GetServiceClassName(service));
-  out->Print("public static $classname$ NewClient(Channel channel)\n",
-             "classname", GetClientClassName(service));
-  out->Print("{\n");
-  out->Indent();
-  out->Print("return new $classname$(channel);\n", "classname",
-             GetClientClassName(service));
-  out->Outdent();
-  out->Print("}\n");
-  out->Print("\n");
-}
-
-void GenerateService(Printer* out, const ServiceDescriptor *service,
+void GenerateService(Printer *out, const ServiceDescriptor *service,
                      bool generate_client, bool generate_server,
                      bool internal_access) {
   GenerateDocCommentBody(out, service);
@@ -524,7 +567,6 @@ void GenerateService(Printer* out, const ServiceDescriptor *service,
   }
   if (generate_client) {
     GenerateClientStub(out, service);
-    GenerateNewStubMethods(out, service);
   }
   if (generate_server) {
     GenerateBindServiceMethod(out, service);
diff --git a/src/compiler/csharp_generator_helpers.h b/src/compiler/csharp_generator_helpers.h
index 9bdf6fb53562064bf8c99be9cde4bc6060c0fe6d..f5d36f257a2e065cff47b51b3464ae6acb7ad3b4 100644
--- a/src/compiler/csharp_generator_helpers.h
+++ b/src/compiler/csharp_generator_helpers.h
@@ -41,14 +41,16 @@ namespace grpc_csharp_generator {
 
 inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
                              grpc::string *file_name_or_error) {
-  *file_name_or_error = grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs";
+  *file_name_or_error =
+      grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs";
   return true;
 }
 
 // Get leading or trailing comments in a string. Comment lines start with "// ".
 // Leading detached comments are put in in front of leading comments.
 template <typename DescriptorType>
-inline grpc::string GetCsharpComments(const DescriptorType *desc, bool leading) {
+inline grpc::string GetCsharpComments(const DescriptorType *desc,
+                                      bool leading) {
   return grpc_generator::GetPrefixedComments(desc, leading, "//");
 }
 
diff --git a/src/compiler/csharp_plugin.cc b/src/compiler/csharp_plugin.cc
index 5350e73f10893c64a227681d999ef1d3ce641d21..7def72c54c75a67fafab657d7938cd0c4f3d7ade 100644
--- a/src/compiler/csharp_plugin.cc
+++ b/src/compiler/csharp_plugin.cc
@@ -67,10 +67,8 @@ class CSharpGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
       }
     }
 
-    grpc::string code = grpc_csharp_generator::GetServices(file,
-                                                           generate_client,
-                                                           generate_server,
-                                                           internal_access);
+    grpc::string code = grpc_csharp_generator::GetServices(
+        file, generate_client, generate_server, internal_access);
     if (code.size() == 0) {
       return true;  // don't generate a file if there are no services
     }
diff --git a/src/compiler/generator_helpers.h b/src/compiler/generator_helpers.h
index 9a88c2bfccd3c3687d5068cad9531fcdd8ed4b02..88d96c008096a68aa722e394c4350c1bf491672c 100644
--- a/src/compiler/generator_helpers.h
+++ b/src/compiler/generator_helpers.h
@@ -84,7 +84,7 @@ inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
     }
     str.replace(pos, from.length(), to);
     pos += to.length();
-  } while(replace_all);
+  } while (replace_all);
 
   return str;
 }
@@ -139,8 +139,8 @@ inline grpc::string LowerUnderscoreToUpperCamel(grpc::string str) {
   return result;
 }
 
-inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file,
-                                         bool include_package_path) {
+inline grpc::string FileNameInUpperCamel(
+    const grpc::protobuf::FileDescriptor *file, bool include_package_path) {
   std::vector<grpc::string> tokens = tokenize(StripProto(file->name()), "/");
   grpc::string result = "";
   if (include_package_path) {
@@ -152,7 +152,8 @@ inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *f
   return result;
 }
 
-inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file) {
+inline grpc::string FileNameInUpperCamel(
+    const grpc::protobuf::FileDescriptor *file) {
   return FileNameInUpperCamel(file, true);
 }
 
@@ -163,7 +164,8 @@ enum MethodType {
   METHODTYPE_BIDI_STREAMING
 };
 
-inline MethodType GetMethodType(const grpc::protobuf::MethodDescriptor *method) {
+inline MethodType GetMethodType(
+    const grpc::protobuf::MethodDescriptor *method) {
   if (method->client_streaming()) {
     if (method->server_streaming()) {
       return METHODTYPE_BIDI_STREAMING;
@@ -254,7 +256,7 @@ inline grpc::string GenerateCommentsWithPrefix(
     const std::vector<grpc::string> &in, const grpc::string &prefix) {
   std::ostringstream oss;
   for (auto it = in.begin(); it != in.end(); it++) {
-    const grpc::string& elem = *it;
+    const grpc::string &elem = *it;
     if (elem.empty()) {
       oss << prefix << "\n";
     } else if (elem[0] == ' ') {
diff --git a/src/compiler/node_generator.cc b/src/compiler/node_generator.cc
index 1fe090d17affe499f3e9e1a730a0e7610602a615..c3852020a3a61c51288858821fcd6bccbc97df9e 100644
--- a/src/compiler/node_generator.cc
+++ b/src/compiler/node_generator.cc
@@ -67,15 +67,15 @@ grpc::string ModuleAlias(const grpc::string filename) {
 
 // Given a filename like foo/bar/baz.proto, returns the corresponding JavaScript
 // message file foo/bar/baz.js
-grpc::string GetJSMessageFilename(const grpc::string& filename) {
+grpc::string GetJSMessageFilename(const grpc::string &filename) {
   grpc::string name = filename;
   return grpc_generator::StripProto(name) + "_pb.js";
 }
 
 // Given a filename like foo/bar/baz.proto, returns the root directory
 // path ../../
-grpc::string GetRootPath(const grpc::string& from_filename,
-                         const grpc::string& to_filename) {
+grpc::string GetRootPath(const grpc::string &from_filename,
+                         const grpc::string &to_filename) {
   if (to_filename.find("google/protobuf") == 0) {
     // Well-known types (.proto files in the google/protobuf directory) are
     // assumed to come from the 'google-protobuf' npm package.  We may want to
@@ -96,21 +96,24 @@ grpc::string GetRootPath(const grpc::string& from_filename,
 
 // Return the relative path to load to_file from the directory containing
 // from_file, assuming that both paths are relative to the same directory
-grpc::string GetRelativePath(const grpc::string& from_file,
-                             const grpc::string& to_file) {
+grpc::string GetRelativePath(const grpc::string &from_file,
+                             const grpc::string &to_file) {
   return GetRootPath(from_file, to_file) + to_file;
 }
 
 /* Finds all message types used in all services in the file, and returns them
  * as a map of fully qualified message type name to message descriptor */
-map<grpc::string, const Descriptor*> GetAllMessages(const FileDescriptor *file) {
-  map<grpc::string, const Descriptor*> message_types;
-  for (int service_num = 0; service_num < file->service_count(); service_num++) {
-    const ServiceDescriptor* service = file->service(service_num);
-    for (int method_num = 0; method_num < service->method_count(); method_num++) {
-      const MethodDescriptor* method = service->method(method_num);
-      const Descriptor* input_type = method->input_type();
-      const Descriptor* output_type = method->output_type();
+map<grpc::string, const Descriptor *> GetAllMessages(
+    const FileDescriptor *file) {
+  map<grpc::string, const Descriptor *> message_types;
+  for (int service_num = 0; service_num < file->service_count();
+       service_num++) {
+    const ServiceDescriptor *service = file->service(service_num);
+    for (int method_num = 0; method_num < service->method_count();
+         method_num++) {
+      const MethodDescriptor *method = service->method(method_num);
+      const Descriptor *input_type = method->input_type();
+      const Descriptor *output_type = method->output_type();
       message_types[input_type->name()] = input_type;
       message_types[output_type->name()] = output_type;
     }
@@ -118,7 +121,7 @@ map<grpc::string, const Descriptor*> GetAllMessages(const FileDescriptor *file)
   return message_types;
 }
 
-grpc::string MessageIdentifierName(const grpc::string& name) {
+grpc::string MessageIdentifierName(const grpc::string &name) {
   return grpc_generator::StringReplace(name, ".", "_");
 }
 
@@ -194,18 +197,18 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
   out->Print(template_vars, "var $name$Service = exports.$name$Service = {\n");
   out->Indent();
   for (int i = 0; i < service->method_count(); i++) {
-    grpc::string method_name = grpc_generator::LowercaseFirstLetter(
-        service->method(i)->name());
+    grpc::string method_name =
+        grpc_generator::LowercaseFirstLetter(service->method(i)->name());
     out->Print(GetNodeComments(service->method(i), true).c_str());
-    out->Print("$method_name$: ",
-               "method_name", method_name);
+    out->Print("$method_name$: ", "method_name", method_name);
     PrintMethod(service->method(i), out);
     out->Print(",\n");
     out->Print(GetNodeComments(service->method(i), false).c_str());
   }
   out->Outdent();
   out->Print("};\n\n");
-  out->Print(template_vars, "exports.$name$Client = "
+  out->Print(template_vars,
+             "exports.$name$Client = "
              "grpc.makeGenericClientConstructor($name$Service);\n");
   out->Print(GetNodeComments(service, false).c_str());
 }
@@ -213,27 +216,25 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
 void PrintImports(const FileDescriptor *file, Printer *out) {
   out->Print("var grpc = require('grpc');\n");
   if (file->message_type_count() > 0) {
-    grpc::string file_path = GetRelativePath(file->name(),
-                                             GetJSMessageFilename(
-                                                 file->name()));
-    out->Print("var $module_alias$ = require('$file_path$');\n",
-               "module_alias", ModuleAlias(file->name()),
-               "file_path", file_path);
+    grpc::string file_path =
+        GetRelativePath(file->name(), GetJSMessageFilename(file->name()));
+    out->Print("var $module_alias$ = require('$file_path$');\n", "module_alias",
+               ModuleAlias(file->name()), "file_path", file_path);
   }
 
   for (int i = 0; i < file->dependency_count(); i++) {
     grpc::string file_path = GetRelativePath(
         file->name(), GetJSMessageFilename(file->dependency(i)->name()));
-    out->Print("var $module_alias$ = require('$file_path$');\n",
-               "module_alias", ModuleAlias(file->dependency(i)->name()),
-               "file_path", file_path);
+    out->Print("var $module_alias$ = require('$file_path$');\n", "module_alias",
+               ModuleAlias(file->dependency(i)->name()), "file_path",
+               file_path);
   }
   out->Print("\n");
 }
 
 void PrintTransformers(const FileDescriptor *file, Printer *out) {
-  map<grpc::string, const Descriptor*> messages = GetAllMessages(file);
-  for (std::map<grpc::string, const Descriptor*>::iterator it =
+  map<grpc::string, const Descriptor *> messages = GetAllMessages(file);
+  for (std::map<grpc::string, const Descriptor *>::iterator it =
            messages.begin();
        it != messages.end(); it++) {
     PrintMessageTransformer(it->second, out);
@@ -246,7 +247,6 @@ void PrintServices(const FileDescriptor *file, Printer *out) {
     PrintService(file->service(i), out);
   }
 }
-
 }
 
 grpc::string GenerateFile(const FileDescriptor *file) {
diff --git a/src/compiler/node_generator_helpers.h b/src/compiler/node_generator_helpers.h
index 5862772841c5e832a7a32ed7124e1101d13a3323..efe94ab00d66ab5a08dc869c7ebac16a0bad591c 100644
--- a/src/compiler/node_generator_helpers.h
+++ b/src/compiler/node_generator_helpers.h
@@ -48,7 +48,7 @@ inline grpc::string GetJSServiceFilename(const grpc::string& filename) {
 // Get leading or trailing comments in a string. Comment lines start with "// ".
 // Leading detached comments are put in in front of leading comments.
 template <typename DescriptorType>
-inline grpc::string GetNodeComments(const DescriptorType *desc, bool leading) {
+inline grpc::string GetNodeComments(const DescriptorType* desc, bool leading) {
   return grpc_generator::GetPrefixedComments(desc, leading, "//");
 }
 
diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc
index 4be8cb41879cbddd3a7a5f2f70008d8ccc100b1f..1d7faf120dc4e5c475204d06d6392bd6ce62deb2 100644
--- a/src/compiler/objective_c_generator.cc
+++ b/src/compiler/objective_c_generator.cc
@@ -49,9 +49,9 @@ using ::std::map;
 namespace grpc_objective_c_generator {
 namespace {
 
-void PrintProtoRpcDeclarationAsPragma(Printer *printer,
-                                      const MethodDescriptor *method,
-                                      map< ::grpc::string, ::grpc::string> vars) {
+void PrintProtoRpcDeclarationAsPragma(
+    Printer *printer, const MethodDescriptor *method,
+    map< ::grpc::string, ::grpc::string> vars) {
   vars["client_stream"] = method->client_streaming() ? "stream " : "";
   vars["server_stream"] = method->server_streaming() ? "stream " : "";
 
@@ -61,7 +61,7 @@ void PrintProtoRpcDeclarationAsPragma(Printer *printer,
 }
 
 template <typename DescriptorType>
-static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
+static void PrintAllComments(const DescriptorType *desc, Printer *printer) {
   std::vector<grpc::string> comments;
   grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED,
                              &comments);
@@ -100,7 +100,8 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
   if (method->server_streaming()) {
     printer->Print(vars,
                    " eventHandler:(void(^)(BOOL done, "
-                   "$response_class$ *_Nullable response, NSError *_Nullable error))eventHandler");
+                   "$response_class$ *_Nullable response, NSError *_Nullable "
+                   "error))eventHandler");
   } else {
     printer->Print(vars,
                    " handler:(void(^)($response_class$ *_Nullable response, "
@@ -123,7 +124,8 @@ void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method,
   PrintMethodSignature(printer, method, vars);
 }
 
-inline map< ::grpc::string, ::grpc::string> GetMethodVars(const MethodDescriptor *method) {
+inline map< ::grpc::string, ::grpc::string> GetMethodVars(
+    const MethodDescriptor *method) {
   map< ::grpc::string, ::grpc::string> res;
   res["method_name"] = method->name();
   res["request_type"] = method->input_type()->name();
@@ -210,7 +212,8 @@ void PrintMethodImplementations(Printer *printer,
     grpc::protobuf::io::StringOutputStream output_stream(&output);
     Printer printer(&output_stream, '$');
 
-    map< ::grpc::string, ::grpc::string> vars = {{"service_class", ServiceClassName(service)}};
+    map< ::grpc::string, ::grpc::string> vars = {
+        {"service_class", ServiceClassName(service)}};
 
     printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
 
@@ -237,21 +240,23 @@ void PrintMethodImplementations(Printer *printer,
 }
 
 ::grpc::string GetSource(const ServiceDescriptor *service) {
- ::grpc::string output;
+  ::grpc::string output;
   {
     // Scope the output stream so it closes and finalizes output to the string.
     grpc::protobuf::io::StringOutputStream output_stream(&output);
     Printer printer(&output_stream, '$');
 
-    map< ::grpc::string,::grpc::string> vars = {{"service_name", service->name()},
-                                {"service_class", ServiceClassName(service)},
-                                {"package", service->file()->package()}};
+    map< ::grpc::string, ::grpc::string> vars = {
+        {"service_name", service->name()},
+        {"service_class", ServiceClassName(service)},
+        {"package", service->file()->package()}};
 
     printer.Print(vars, "@implementation $service_class$\n\n");
 
     printer.Print("// Designated initializer\n");
     printer.Print("- (instancetype)initWithHost:(NSString *)host {\n");
-    printer.Print(vars,
+    printer.Print(
+        vars,
         "  return (self = [super initWithHost:host"
         " packageName:@\"$package$\" serviceName:@\"$service_name$\"]);\n");
     printer.Print("}\n\n");
diff --git a/src/compiler/objective_c_generator_helpers.h b/src/compiler/objective_c_generator_helpers.h
index 1f8c80014f0fac4e4f7866782a0462766793c080..b482f028a10ffeee10ad1aa773dce4ba96a3df9c 100644
--- a/src/compiler/objective_c_generator_helpers.h
+++ b/src/compiler/objective_c_generator_helpers.h
@@ -53,6 +53,5 @@ inline string ServiceClassName(const ServiceDescriptor *service) {
   string prefix = file->options().objc_class_prefix();
   return prefix + service->name();
 }
-
 }
 #endif  // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H
diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc
index 3ccfd5b037c54d9e5cb78090465b749254cfb51a..8de0997ebeae33f503c725219c1690570bab4c3c 100644
--- a/src/compiler/objective_c_plugin.cc
+++ b/src/compiler/objective_c_plugin.cc
@@ -39,6 +39,12 @@
 #include "src/compiler/objective_c_generator.h"
 #include "src/compiler/objective_c_generator_helpers.h"
 
+#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
+
+using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
+using ::google::protobuf::compiler::objectivec::
+    IsProtobufLibraryBundledProtoFile;
+
 class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
  public:
   ObjectiveCGrpcGenerator() {}
@@ -48,7 +54,6 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
                         const ::grpc::string &parameter,
                         grpc::protobuf::compiler::GeneratorContext *context,
                         ::grpc::string *error) const {
-
     if (file->service_count() == 0) {
       // No services.  Do nothing.
       return true;
@@ -61,18 +66,35 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
       // Generate .pbrpc.h
 
       ::grpc::string imports = ::grpc::string("#import \"") + file_name +
-        ".pbobjc.h\"\n\n"
-        "#import <ProtoRPC/ProtoService.h>\n"
-        "#import <RxLibrary/GRXWriteable.h>\n"
-        "#import <RxLibrary/GRXWriter.h>\n";
+                               ".pbobjc.h\"\n\n"
+                               "#import <ProtoRPC/ProtoService.h>\n"
+                               "#import <RxLibrary/GRXWriteable.h>\n"
+                               "#import <RxLibrary/GRXWriter.h>\n";
 
       // TODO(jcanizales): Instead forward-declare the input and output types
       // and import the files in the .pbrpc.m
       ::grpc::string proto_imports;
       for (int i = 0; i < file->dependency_count(); i++) {
-        ::grpc::string header = grpc_objective_c_generator::MessageHeaderName(
-            file->dependency(i));
-        proto_imports += ::grpc::string("#import \"") + header + "\"\n";
+        ::grpc::string header =
+            grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
+        const grpc::protobuf::FileDescriptor *dependency = file->dependency(i);
+        if (IsProtobufLibraryBundledProtoFile(dependency)) {
+          ::grpc::string base_name = header;
+          grpc_generator::StripPrefix(&base_name, "google/protobuf/");
+          // create the import code snippet
+          proto_imports +=
+              "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
+              "  #import <" +
+              ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name +
+              ">\n"
+              "#else\n"
+              "  #import \"" +
+              header +
+              "\"\n"
+              "#endif\n";
+        } else {
+          proto_imports += ::grpc::string("#import \"") + header + "\"\n";
+        }
       }
 
       ::grpc::string declarations;
@@ -81,21 +103,22 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
         declarations += grpc_objective_c_generator::GetHeader(service);
       }
 
-      static const ::grpc::string kNonNullBegin = "\nNS_ASSUME_NONNULL_BEGIN\n\n";
+      static const ::grpc::string kNonNullBegin =
+          "\nNS_ASSUME_NONNULL_BEGIN\n\n";
       static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n";
 
-      Write(context, file_name + ".pbrpc.h",
-          imports + '\n' + proto_imports + '\n' + kNonNullBegin + 
-          declarations + kNonNullEnd);
+      Write(context, file_name + ".pbrpc.h", imports + '\n' + proto_imports +
+                                                 '\n' + kNonNullBegin +
+                                                 declarations + kNonNullEnd);
     }
 
     {
       // Generate .pbrpc.m
 
       ::grpc::string imports = ::grpc::string("#import \"") + file_name +
-        ".pbrpc.h\"\n\n"
-        "#import <ProtoRPC/ProtoRPC.h>\n"
-        "#import <RxLibrary/GRXWriter+Immediate.h>\n";
+                               ".pbrpc.h\"\n\n"
+                               "#import <ProtoRPC/ProtoRPC.h>\n"
+                               "#import <RxLibrary/GRXWriter+Immediate.h>\n";
 
       ::grpc::string definitions;
       for (int i = 0; i < file->service_count(); i++) {
@@ -112,7 +135,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
  private:
   // Write the given code into the given file.
   void Write(grpc::protobuf::compiler::GeneratorContext *context,
-              const ::grpc::string &filename, const ::grpc::string &code) const {
+             const ::grpc::string &filename, const ::grpc::string &code) const {
     std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output(
         context->Open(filename));
     grpc::protobuf::io::CodedOutputStream coded_out(output.get());
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 15cda474ccd37a423268dc3e381d99b74548e851..0f61b1fb6c03c9d5eb651d9a5178d12d32fc5411 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -66,21 +66,26 @@ using std::vector;
 
 namespace grpc_python_generator {
 
+GeneratorConfiguration::GeneratorConfiguration()
+    : grpc_package_root("grpc"), beta_package_root("grpc.beta") {}
+
 PythonGrpcGenerator::PythonGrpcGenerator(const GeneratorConfiguration& config)
     : config_(config) {}
 
 PythonGrpcGenerator::~PythonGrpcGenerator() {}
 
-bool PythonGrpcGenerator::Generate(
-    const FileDescriptor* file, const grpc::string& parameter,
-    GeneratorContext* context, grpc::string* error) const {
+bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
+                                   const grpc::string& parameter,
+                                   GeneratorContext* context,
+                                   grpc::string* error) const {
   // Get output file name.
   grpc::string file_name;
   static const int proto_suffix_length = strlen(".proto");
   if (file->name().size() > static_cast<size_t>(proto_suffix_length) &&
       file->name().find_last_of(".proto") == file->name().size() - 1) {
-    file_name = file->name().substr(
-        0, file->name().size() - proto_suffix_length) + "_pb2.py";
+    file_name =
+        file->name().substr(0, file->name().size() - proto_suffix_length) +
+        "_pb2.py";
   } else {
     *error = "Invalid proto file name. Proto file must end with .proto";
     return false;
@@ -112,7 +117,7 @@ map<grpc::string, grpc::string> ListToDict(
   assert(values.size() % 2 == 0);
   map<grpc::string, grpc::string> value_map;
   auto value_iter = values.begin();
-  for (unsigned i = 0; i < values.size()/2; ++i) {
+  for (unsigned i = 0; i < values.size() / 2; ++i) {
     grpc::string key = *value_iter;
     ++value_iter;
     grpc::string value = *value_iter;
@@ -135,9 +140,7 @@ class IndentScope {
     printer_->Indent();
   }
 
-  ~IndentScope() {
-    printer_->Outdent();
-  }
+  ~IndentScope() { printer_->Outdent(); }
 
  private:
   Printer* printer_;
@@ -170,7 +173,6 @@ grpc::string ModuleAlias(const grpc::string& filename) {
   return module_name;
 }
 
-
 bool GetModuleAndMessagePath(const Descriptor* type,
                              const ServiceDescriptor* service,
                              grpc::string* out) {
@@ -179,7 +181,7 @@ bool GetModuleAndMessagePath(const Descriptor* type,
   do {
     message_path.push_back(path_elem_type);
     path_elem_type = path_elem_type->containing_type();
-  } while (path_elem_type); // implicit nullptr comparison; don't be explicit
+  } while (path_elem_type);  // implicit nullptr comparison; don't be explicit
   grpc::string file_name = type->file()->name();
   static const int proto_suffix_length = strlen(".proto");
   if (!(file_name.size() > static_cast<size_t>(proto_suffix_length) &&
@@ -187,11 +189,11 @@ bool GetModuleAndMessagePath(const Descriptor* type,
     return false;
   }
   grpc::string service_file_name = service->file()->name();
-  grpc::string module = service_file_name == file_name ?
-          "" : ModuleAlias(file_name) + ".";
+  grpc::string module =
+      service_file_name == file_name ? "" : ModuleAlias(file_name) + ".";
   grpc::string message_type;
-  for (auto path_iter = message_path.rbegin();
-       path_iter != message_path.rend(); ++path_iter) {
+  for (auto path_iter = message_path.rbegin(); path_iter != message_path.rend();
+       ++path_iter) {
     message_type += (*path_iter)->name() + ".";
   }
   // no pop_back prior to C++11
@@ -226,8 +228,7 @@ static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
   printer->Print("\"\"\"\n");
 }
 
-bool PrintBetaServicer(const ServiceDescriptor* service,
-                       Printer* out) {
+bool PrintBetaServicer(const ServiceDescriptor* service, Printer* out) {
   out->Print("\n\n");
   out->Print("class Beta$Service$Servicer(object):\n", "Service",
              service->name());
@@ -236,10 +237,10 @@ bool PrintBetaServicer(const ServiceDescriptor* service,
     PrintAllComments(service, out);
     for (int i = 0; i < service->method_count(); ++i) {
       auto meth = service->method(i);
-      grpc::string arg_name = meth->client_streaming() ?
-          "request_iterator" : "request";
-      out->Print("def $Method$(self, $ArgName$, context):\n",
-                 "Method", meth->name(), "ArgName", arg_name);
+      grpc::string arg_name =
+          meth->client_streaming() ? "request_iterator" : "request";
+      out->Print("def $Method$(self, $ArgName$, context):\n", "Method",
+                 meth->name(), "ArgName", arg_name);
       {
         IndentScope raii_method_indent(out);
         PrintAllComments(meth, out);
@@ -250,8 +251,7 @@ bool PrintBetaServicer(const ServiceDescriptor* service,
   return true;
 }
 
-bool PrintBetaStub(const ServiceDescriptor* service,
-                   Printer* out) {
+bool PrintBetaStub(const ServiceDescriptor* service, Printer* out) {
   out->Print("\n\n");
   out->Print("class Beta$Service$Stub(object):\n", "Service", service->name());
   {
@@ -259,10 +259,12 @@ bool PrintBetaStub(const ServiceDescriptor* service,
     PrintAllComments(service, out);
     for (int i = 0; i < service->method_count(); ++i) {
       const MethodDescriptor* meth = service->method(i);
-      grpc::string arg_name = meth->client_streaming() ?
-          "request_iterator" : "request";
+      grpc::string arg_name =
+          meth->client_streaming() ? "request_iterator" : "request";
       auto methdict = ListToDict({"Method", meth->name(), "ArgName", arg_name});
-      out->Print(methdict, "def $Method$(self, $ArgName$, timeout, metadata=None, with_call=False, protocol_options=None):\n");
+      out->Print(methdict,
+                 "def $Method$(self, $ArgName$, timeout, metadata=None, "
+                 "with_call=False, protocol_options=None):\n");
       {
         IndentScope raii_method_indent(out);
         PrintAllComments(meth, out);
@@ -279,9 +281,10 @@ bool PrintBetaStub(const ServiceDescriptor* service,
 bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
                             const ServiceDescriptor* service, Printer* out) {
   out->Print("\n\n");
-  out->Print("def beta_create_$Service$_server(servicer, pool=None, "
-             "pool_size=None, default_timeout=None, maximum_timeout=None):\n",
-             "Service", service->name());
+  out->Print(
+      "def beta_create_$Service$_server(servicer, pool=None, "
+      "pool_size=None, default_timeout=None, maximum_timeout=None):\n",
+      "Service", service->name());
   {
     IndentScope raii_create_server_indent(out);
     map<grpc::string, grpc::string> method_implementation_constructors;
@@ -312,58 +315,62 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
     }
     out->Print("request_deserializers = {\n");
     for (auto name_and_input_module_class_pair =
-           input_message_modules_and_classes.begin();
+             input_message_modules_and_classes.begin();
          name_and_input_module_class_pair !=
-           input_message_modules_and_classes.end();
+         input_message_modules_and_classes.end();
          name_and_input_module_class_pair++) {
       IndentScope raii_indent(out);
-      out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
-                 "$InputTypeModuleAndClass$.FromString,\n",
-                 "PackageQualifiedServiceName", package_qualified_service_name,
-                 "MethodName", name_and_input_module_class_pair->first,
-                 "InputTypeModuleAndClass",
-                 name_and_input_module_class_pair->second);
+      out->Print(
+          "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+          "$InputTypeModuleAndClass$.FromString,\n",
+          "PackageQualifiedServiceName", package_qualified_service_name,
+          "MethodName", name_and_input_module_class_pair->first,
+          "InputTypeModuleAndClass", name_and_input_module_class_pair->second);
     }
     out->Print("}\n");
     out->Print("response_serializers = {\n");
     for (auto name_and_output_module_class_pair =
-           output_message_modules_and_classes.begin();
+             output_message_modules_and_classes.begin();
          name_and_output_module_class_pair !=
-           output_message_modules_and_classes.end();
+         output_message_modules_and_classes.end();
          name_and_output_module_class_pair++) {
       IndentScope raii_indent(out);
-      out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
-                 "$OutputTypeModuleAndClass$.SerializeToString,\n",
-                 "PackageQualifiedServiceName", package_qualified_service_name,
-                 "MethodName", name_and_output_module_class_pair->first,
-                 "OutputTypeModuleAndClass",
-                 name_and_output_module_class_pair->second);
+      out->Print(
+          "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+          "$OutputTypeModuleAndClass$.SerializeToString,\n",
+          "PackageQualifiedServiceName", package_qualified_service_name,
+          "MethodName", name_and_output_module_class_pair->first,
+          "OutputTypeModuleAndClass",
+          name_and_output_module_class_pair->second);
     }
     out->Print("}\n");
     out->Print("method_implementations = {\n");
     for (auto name_and_implementation_constructor =
-           method_implementation_constructors.begin();
-	 name_and_implementation_constructor !=
-	   method_implementation_constructors.end();
-	 name_and_implementation_constructor++) {
+             method_implementation_constructors.begin();
+         name_and_implementation_constructor !=
+         method_implementation_constructors.end();
+         name_and_implementation_constructor++) {
       IndentScope raii_descriptions_indent(out);
       const grpc::string method_name =
           name_and_implementation_constructor->first;
-      out->Print("(\'$PackageQualifiedServiceName$\', \'$Method$\'): "
-                 "face_utilities.$Constructor$(servicer.$Method$),\n",
-                 "PackageQualifiedServiceName", package_qualified_service_name,
-                 "Method", name_and_implementation_constructor->first,
-                 "Constructor", name_and_implementation_constructor->second);
+      out->Print(
+          "(\'$PackageQualifiedServiceName$\', \'$Method$\'): "
+          "face_utilities.$Constructor$(servicer.$Method$),\n",
+          "PackageQualifiedServiceName", package_qualified_service_name,
+          "Method", name_and_implementation_constructor->first, "Constructor",
+          name_and_implementation_constructor->second);
     }
     out->Print("}\n");
-    out->Print("server_options = beta_implementations.server_options("
-               "request_deserializers=request_deserializers, "
-               "response_serializers=response_serializers, "
-               "thread_pool=pool, thread_pool_size=pool_size, "
-               "default_timeout=default_timeout, "
-               "maximum_timeout=maximum_timeout)\n");
-    out->Print("return beta_implementations.server(method_implementations, "
-               "options=server_options)\n");
+    out->Print(
+        "server_options = beta_implementations.server_options("
+        "request_deserializers=request_deserializers, "
+        "response_serializers=response_serializers, "
+        "thread_pool=pool, thread_pool_size=pool_size, "
+        "default_timeout=default_timeout, "
+        "maximum_timeout=maximum_timeout)\n");
+    out->Print(
+        "return beta_implementations.server(method_implementations, "
+        "options=server_options)\n");
   }
   return true;
 }
@@ -371,10 +378,11 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
 bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
                           const ServiceDescriptor* service, Printer* out) {
   map<grpc::string, grpc::string> dict = ListToDict({
-        "Service", service->name(),
-      });
+      "Service", service->name(),
+  });
   out->Print("\n\n");
-  out->Print(dict, "def beta_create_$Service$_stub(channel, host=None,"
+  out->Print(dict,
+             "def beta_create_$Service$_stub(channel, host=None,"
              " metadata_transformer=None, pool=None, pool_size=None):\n");
   {
     IndentScope raii_create_server_indent(out);
@@ -384,8 +392,7 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
     for (int i = 0; i < service->method_count(); ++i) {
       const MethodDescriptor* method = service->method(i);
       const grpc::string method_cardinality =
-          grpc::string(method->client_streaming() ? "STREAM" : "UNARY") +
-          "_" +
+          grpc::string(method->client_streaming() ? "STREAM" : "UNARY") + "_" +
           grpc::string(method->server_streaming() ? "STREAM" : "UNARY");
       grpc::string input_message_module_and_class;
       if (!GetModuleAndMessagePath(method->input_type(), service,
@@ -406,32 +413,33 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
     }
     out->Print("request_serializers = {\n");
     for (auto name_and_input_module_class_pair =
-           input_message_modules_and_classes.begin();
+             input_message_modules_and_classes.begin();
          name_and_input_module_class_pair !=
-           input_message_modules_and_classes.end();
+         input_message_modules_and_classes.end();
          name_and_input_module_class_pair++) {
       IndentScope raii_indent(out);
-      out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
-                 "$InputTypeModuleAndClass$.SerializeToString,\n",
-                 "PackageQualifiedServiceName", package_qualified_service_name,
-                 "MethodName", name_and_input_module_class_pair->first,
-                 "InputTypeModuleAndClass",
-                 name_and_input_module_class_pair->second);
+      out->Print(
+          "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+          "$InputTypeModuleAndClass$.SerializeToString,\n",
+          "PackageQualifiedServiceName", package_qualified_service_name,
+          "MethodName", name_and_input_module_class_pair->first,
+          "InputTypeModuleAndClass", name_and_input_module_class_pair->second);
     }
     out->Print("}\n");
     out->Print("response_deserializers = {\n");
     for (auto name_and_output_module_class_pair =
-           output_message_modules_and_classes.begin();
+             output_message_modules_and_classes.begin();
          name_and_output_module_class_pair !=
-           output_message_modules_and_classes.end();
+         output_message_modules_and_classes.end();
          name_and_output_module_class_pair++) {
       IndentScope raii_indent(out);
-      out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
-                 "$OutputTypeModuleAndClass$.FromString,\n",
-                 "PackageQualifiedServiceName", package_qualified_service_name,
-                 "MethodName", name_and_output_module_class_pair->first,
-                 "OutputTypeModuleAndClass",
-                 name_and_output_module_class_pair->second);
+      out->Print(
+          "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): "
+          "$OutputTypeModuleAndClass$.FromString,\n",
+          "PackageQualifiedServiceName", package_qualified_service_name,
+          "MethodName", name_and_output_module_class_pair->first,
+          "OutputTypeModuleAndClass",
+          name_and_output_module_class_pair->second);
     }
     out->Print("}\n");
     out->Print("cardinalities = {\n");
@@ -440,17 +448,19 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
          name_and_cardinality++) {
       IndentScope raii_descriptions_indent(out);
       out->Print("\'$Method$\': cardinality.Cardinality.$Cardinality$,\n",
-                 "Method", name_and_cardinality->first,
-                 "Cardinality", name_and_cardinality->second);
+                 "Method", name_and_cardinality->first, "Cardinality",
+                 name_and_cardinality->second);
     }
     out->Print("}\n");
-    out->Print("stub_options = beta_implementations.stub_options("
-               "host=host, metadata_transformer=metadata_transformer, "
-               "request_serializers=request_serializers, "
-               "response_deserializers=response_deserializers, "
-               "thread_pool=pool, thread_pool_size=pool_size)\n");
     out->Print(
-        "return beta_implementations.dynamic_stub(channel, \'$PackageQualifiedServiceName$\', "
+        "stub_options = beta_implementations.stub_options("
+        "host=host, metadata_transformer=metadata_transformer, "
+        "request_serializers=request_serializers, "
+        "response_deserializers=response_deserializers, "
+        "thread_pool=pool, thread_pool_size=pool_size)\n");
+    out->Print(
+        "return beta_implementations.dynamic_stub(channel, "
+        "\'$PackageQualifiedServiceName$\', "
         "cardinalities, options=stub_options)\n",
         "PackageQualifiedServiceName", package_qualified_service_name);
   }
@@ -473,43 +483,41 @@ bool PrintStub(const grpc::string& package_qualified_service_name,
       out->Print("Args:\n");
       {
         IndentScope raii_args_indent(out);
-	out->Print("channel: A grpc.Channel.\n");
+        out->Print("channel: A grpc.Channel.\n");
       }
       out->Print("\"\"\"\n");
       for (int i = 0; i < service->method_count(); ++i) {
         auto method = service->method(i);
-	auto multi_callable_constructor =
-	    grpc::string(method->client_streaming() ? "stream" : "unary") +
-	    "_" +
-	    grpc::string(method->server_streaming() ? "stream" : "unary");
-	grpc::string request_module_and_class;
-	if (!GetModuleAndMessagePath(method->input_type(), service,
-				     &request_module_and_class)) {
-	  return false;
-	}
-	grpc::string response_module_and_class;
-	if (!GetModuleAndMessagePath(method->output_type(), service,
-				     &response_module_and_class)) {
+        auto multi_callable_constructor =
+            grpc::string(method->client_streaming() ? "stream" : "unary") +
+            "_" + grpc::string(method->server_streaming() ? "stream" : "unary");
+        grpc::string request_module_and_class;
+        if (!GetModuleAndMessagePath(method->input_type(), service,
+                                     &request_module_and_class)) {
+          return false;
+        }
+        grpc::string response_module_and_class;
+        if (!GetModuleAndMessagePath(method->output_type(), service,
+                                     &response_module_and_class)) {
           return false;
-	}
-	out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n",
-		   "Method", method->name(),
-		   "MultiCallableConstructor", multi_callable_constructor);
-	{
+        }
+        out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n",
+                   "Method", method->name(), "MultiCallableConstructor",
+                   multi_callable_constructor);
+        {
           IndentScope raii_first_attribute_indent(out);
           IndentScope raii_second_attribute_indent(out);
-	  out->Print(
-	      "'/$PackageQualifiedService$/$Method$',\n",
-	      "PackageQualifiedService", package_qualified_service_name,
-	      "Method", method->name());
-	  out->Print(
-	      "request_serializer=$RequestModuleAndClass$.SerializeToString,\n",
-	      "RequestModuleAndClass", request_module_and_class);
-	  out->Print(
+          out->Print("'/$PackageQualifiedService$/$Method$',\n",
+                     "PackageQualifiedService", package_qualified_service_name,
+                     "Method", method->name());
+          out->Print(
+              "request_serializer=$RequestModuleAndClass$.SerializeToString,\n",
+              "RequestModuleAndClass", request_module_and_class);
+          out->Print(
               "response_deserializer=$ResponseModuleAndClass$.FromString,\n",
-	      "ResponseModuleAndClass", response_module_and_class);
-	  out->Print(")\n");
-	}
+              "ResponseModuleAndClass", response_module_and_class);
+          out->Print(")\n");
+        }
       }
     }
   }
@@ -524,11 +532,11 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) {
     PrintAllComments(service, out);
     for (int i = 0; i < service->method_count(); ++i) {
       auto method = service->method(i);
-      grpc::string arg_name = method->client_streaming() ?
-	  "request_iterator" : "request";
+      grpc::string arg_name =
+          method->client_streaming() ? "request_iterator" : "request";
       out->Print("\n");
-      out->Print("def $Method$(self, $ArgName$, context):\n",
-                 "Method", method->name(), "ArgName", arg_name);
+      out->Print("def $Method$(self, $ArgName$, context):\n", "Method",
+                 method->name(), "ArgName", arg_name);
       {
         IndentScope raii_method_indent(out);
         PrintAllComments(method, out);
@@ -541,11 +549,12 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) {
   return true;
 }
 
-bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name,
-			      const ServiceDescriptor* service, Printer* out) {
+bool PrintAddServicerToServer(
+    const grpc::string& package_qualified_service_name,
+    const ServiceDescriptor* service, Printer* out) {
   out->Print("\n\n");
   out->Print("def add_$Service$Servicer_to_server(servicer, server):\n",
-	     "Service", service->name());
+             "Service", service->name());
   {
     IndentScope raii_class_indent(out);
     out->Print("rpc_method_handlers = {\n");
@@ -554,34 +563,37 @@ bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name
       IndentScope raii_dict_second_indent(out);
       for (int i = 0; i < service->method_count(); ++i) {
         auto method = service->method(i);
-	auto method_handler_constructor =
+        auto method_handler_constructor =
             grpc::string(method->client_streaming() ? "stream" : "unary") +
-	    "_" +
+            "_" +
             grpc::string(method->server_streaming() ? "stream" : "unary") +
             "_rpc_method_handler";
-	grpc::string request_module_and_class;
-	if (!GetModuleAndMessagePath(method->input_type(), service,
-				     &request_module_and_class)) {
-	  return false;
-	}
-	grpc::string response_module_and_class;
-	if (!GetModuleAndMessagePath(method->output_type(), service,
-				     &response_module_and_class)) {
+        grpc::string request_module_and_class;
+        if (!GetModuleAndMessagePath(method->input_type(), service,
+                                     &request_module_and_class)) {
+          return false;
+        }
+        grpc::string response_module_and_class;
+        if (!GetModuleAndMessagePath(method->output_type(), service,
+                                     &response_module_and_class)) {
           return false;
-	}
-        out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n",
-		   "Method", method->name(),
-		   "MethodHandlerConstructor", method_handler_constructor);
-	{
+        }
+        out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n", "Method",
+                   method->name(), "MethodHandlerConstructor",
+                   method_handler_constructor);
+        {
           IndentScope raii_call_first_indent(out);
-	  IndentScope raii_call_second_indent(out);
-	  out->Print("servicer.$Method$,\n", "Method", method->name());
-	  out->Print("request_deserializer=$RequestModuleAndClass$.FromString,\n",
-		     "RequestModuleAndClass", request_module_and_class);
-	  out->Print("response_serializer=$ResponseModuleAndClass$.SerializeToString,\n",
-		     "ResponseModuleAndClass", response_module_and_class);
-	}
-	out->Print("),\n");
+          IndentScope raii_call_second_indent(out);
+          out->Print("servicer.$Method$,\n", "Method", method->name());
+          out->Print(
+              "request_deserializer=$RequestModuleAndClass$.FromString,\n",
+              "RequestModuleAndClass", request_module_and_class);
+          out->Print(
+              "response_serializer=$ResponseModuleAndClass$.SerializeToString,"
+              "\n",
+              "ResponseModuleAndClass", response_module_and_class);
+        }
+        out->Print("),\n");
       }
     }
     out->Print("}\n");
@@ -590,7 +602,7 @@ bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name
       IndentScope raii_call_first_indent(out);
       IndentScope raii_call_second_indent(out);
       out->Print("'$PackageQualifiedServiceName$', rpc_method_handlers)\n",
-		 "PackageQualifiedServiceName", package_qualified_service_name);
+                 "PackageQualifiedServiceName", package_qualified_service_name);
     }
     out->Print("server.add_generic_rpc_handlers((generic_handler,))\n");
   }
@@ -602,10 +614,12 @@ bool PrintPreamble(const FileDescriptor* file,
   out->Print("import $Package$\n", "Package", config.grpc_package_root);
   out->Print("from $Package$ import implementations as beta_implementations\n",
              "Package", config.beta_package_root);
-  out->Print("from $Package$ import interfaces as beta_interfaces\n",
-             "Package", config.beta_package_root);
+  out->Print("from $Package$ import interfaces as beta_interfaces\n", "Package",
+             config.beta_package_root);
   out->Print("from grpc.framework.common import cardinality\n");
-  out->Print("from grpc.framework.interfaces.face import utilities as face_utilities\n");
+  out->Print(
+      "from grpc.framework.interfaces.face import utilities as "
+      "face_utilities\n");
   return true;
 }
 
@@ -629,12 +643,14 @@ pair<bool, grpc::string> GetServices(const FileDescriptor* file,
       auto service = file->service(i);
       auto package_qualified_service_name = package + service->name();
       if (!(PrintStub(package_qualified_service_name, service, &out) &&
-	    PrintServicer(service, &out) &&
-	    PrintAddServicerToServer(package_qualified_service_name, service, &out) &&
-	    PrintBetaServicer(service, &out) &&
-            PrintBetaStub(service, &out) &&
-            PrintBetaServerFactory(package_qualified_service_name, service, &out) &&
-            PrintBetaStubFactory(package_qualified_service_name, service, &out))) {
+            PrintServicer(service, &out) &&
+            PrintAddServicerToServer(package_qualified_service_name, service,
+                                     &out) &&
+            PrintBetaServicer(service, &out) && PrintBetaStub(service, &out) &&
+            PrintBetaServerFactory(package_qualified_service_name, service,
+                                   &out) &&
+            PrintBetaStubFactory(package_qualified_service_name, service,
+                                 &out))) {
         return make_pair(false, "");
       }
     }
diff --git a/src/compiler/python_generator.h b/src/compiler/python_generator.h
index fc51b48daec314ba4d80abcaa6f85b6878f994a9..9bbb83bca6e500cda3741d657c6269e4721cc6a0 100644
--- a/src/compiler/python_generator.h
+++ b/src/compiler/python_generator.h
@@ -43,6 +43,7 @@ namespace grpc_python_generator {
 // Data pertaining to configuration of the generator with respect to anything
 // that may be used internally at Google.
 struct GeneratorConfiguration {
+  GeneratorConfiguration();
   grpc::string grpc_package_root;
   grpc::string beta_package_root;
 };
@@ -56,6 +57,7 @@ class PythonGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
                 const grpc::string& parameter,
                 grpc::protobuf::compiler::GeneratorContext* context,
                 grpc::string* error) const;
+
  private:
   GeneratorConfiguration config_;
 };
diff --git a/src/compiler/python_plugin.cc b/src/compiler/python_plugin.cc
index fc76ee5ec6d458aa93df583ae8d9cd7b813b7de4..3457aa631a36bb11547ece7c50f3524bfc008300 100644
--- a/src/compiler/python_plugin.cc
+++ b/src/compiler/python_plugin.cc
@@ -38,8 +38,6 @@
 
 int main(int argc, char* argv[]) {
   grpc_python_generator::GeneratorConfiguration config;
-  config.grpc_package_root = "grpc";
-  config.beta_package_root = "grpc.beta";
   grpc_python_generator::PythonGrpcGenerator generator(config);
   return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
 }
diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc
index 1501c3f3e01b050ae2b2be9afbb3d64e68e859de..02202568cb4137a5088a12e33846d76d9c9d04cf 100644
--- a/src/compiler/ruby_generator.cc
+++ b/src/compiler/ruby_generator.cc
@@ -55,17 +55,20 @@ namespace {
 // Prints out the method using the ruby gRPC DSL.
 void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
                  Printer *out) {
-  grpc::string input_type = RubyTypeOf(method->input_type()->full_name(), package);
+  grpc::string input_type =
+      RubyTypeOf(method->input_type()->full_name(), package);
   if (method->client_streaming()) {
     input_type = "stream(" + input_type + ")";
   }
-  grpc::string output_type = RubyTypeOf(method->output_type()->full_name(), package);
+  grpc::string output_type =
+      RubyTypeOf(method->output_type()->full_name(), package);
   if (method->server_streaming()) {
     output_type = "stream(" + output_type + ")";
   }
-  std::map<grpc::string, grpc::string> method_vars =
-      ListToDict({"mth.name", method->name(), "input.type", input_type,
-                  "output.type", output_type, });
+  std::map<grpc::string, grpc::string> method_vars = ListToDict({
+      "mth.name", method->name(), "input.type", input_type, "output.type",
+      output_type,
+  });
   out->Print(GetRubyComments(method, true).c_str());
   out->Print(method_vars, "rpc :$mth.name$, $input.type$, $output.type$\n");
   out->Print(GetRubyComments(method, false).c_str());
@@ -79,8 +82,9 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
   }
 
   // Begin the service module
-  std::map<grpc::string, grpc::string> module_vars =
-      ListToDict({"module.name", CapitalizeFirst(service->name()), });
+  std::map<grpc::string, grpc::string> module_vars = ListToDict({
+      "module.name", CapitalizeFirst(service->name()),
+  });
   out->Print(module_vars, "module $module.name$\n");
   out->Indent();
 
@@ -130,8 +134,9 @@ grpc::string GetServices(const FileDescriptor *file) {
     }
 
     // Write out a file header.
-    std::map<grpc::string, grpc::string> header_comment_vars = ListToDict(
-        {"file.name", file->name(), "file.package", file->package(), });
+    std::map<grpc::string, grpc::string> header_comment_vars = ListToDict({
+        "file.name", file->name(), "file.package", file->package(),
+    });
     out.Print("# Generated by the protocol buffer compiler.  DO NOT EDIT!\n");
     out.Print(header_comment_vars,
               "# Source: $file.name$ for package '$file.package$'\n");
@@ -147,16 +152,18 @@ grpc::string GetServices(const FileDescriptor *file) {
     // Write out require statemment to import the separately generated file
     // that defines the messages used by the service. This is generated by the
     // main ruby plugin.
-    std::map<grpc::string, grpc::string> dep_vars =
-        ListToDict({"dep.name", MessagesRequireName(file), });
+    std::map<grpc::string, grpc::string> dep_vars = ListToDict({
+        "dep.name", MessagesRequireName(file),
+    });
     out.Print(dep_vars, "require '$dep.name$'\n");
 
     // Write out services within the modules
     out.Print("\n");
     std::vector<grpc::string> modules = Split(file->package(), '.');
     for (size_t i = 0; i < modules.size(); ++i) {
-      std::map<grpc::string, grpc::string> module_vars =
-          ListToDict({"module.name", CapitalizeFirst(modules[i]), });
+      std::map<grpc::string, grpc::string> module_vars = ListToDict({
+          "module.name", CapitalizeFirst(modules[i]),
+      });
       out.Print(module_vars, "module $module.name$\n");
       out.Indent();
     }
diff --git a/src/compiler/ruby_generator_map-inl.h b/src/compiler/ruby_generator_map-inl.h
index 6b87774f2136b576a47fdd822b4d1dc9163359f2..75ba7fc8f19a6c6eb20dd7fc5902cda27bc5e7e0 100644
--- a/src/compiler/ruby_generator_map-inl.h
+++ b/src/compiler/ruby_generator_map-inl.h
@@ -36,8 +36,8 @@
 
 #include "src/compiler/config.h"
 
-#include <iostream>
 #include <initializer_list>
+#include <iostream>
 #include <map>
 #include <ostream>  // NOLINT
 #include <vector>
@@ -53,8 +53,7 @@ namespace grpc_ruby_generator {
 inline std::map<grpc::string, grpc::string> ListToDict(
     const initializer_list<grpc::string> &values) {
   if (values.size() % 2 != 0) {
-    std::cerr << "Not every 'key' has a value in `values`."
-              << std::endl;
+    std::cerr << "Not every 'key' has a value in `values`." << std::endl;
   }
   std::map<grpc::string, grpc::string> value_map;
   auto value_iter = values.begin();
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 72e4e5427ebdc5d524207af35a6b938bac03a269..f51d850e013b88df231525c429a9fdaaf6319948 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -45,6 +45,7 @@
 #include "src/core/ext/census/census_interface.h"
 #include "src/core/ext/census/census_rpc_stats.h"
 #include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/profiling/timers.h"
 #include "src/core/lib/transport/static_metadata.h"
 
 typedef struct call_data {
@@ -92,6 +93,7 @@ static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
 
 static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
                                 grpc_error *error) {
+  GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0);
   grpc_call_element *elem = ptr;
   call_data *calld = elem->call_data;
   channel_data *chand = elem->channel_data;
@@ -99,6 +101,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
     extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
   }
   calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
+  GPR_TIMER_END("census-server:server_on_done_recv", 0);
 }
 
 static void server_mutate_op(grpc_call_element *elem,
diff --git a/src/core/ext/client_config/channel_connectivity.c b/src/core/ext/client_config/channel_connectivity.c
index 20c01a9a7cf2538ee8aa52a8d385d1065a445225..c1220e3a8c354e2668a3a9fbd2970c3932a37156 100644
--- a/src/core/ext/client_config/channel_connectivity.c
+++ b/src/core/ext/client_config/channel_connectivity.c
@@ -189,10 +189,11 @@ void grpc_channel_watch_connectivity_state(
   GRPC_API_TRACE(
       "grpc_channel_watch_connectivity_state("
       "channel=%p, last_observed_state=%d, "
-      "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+      "deadline=gpr_timespec { tv_sec: %" PRId64
+      ", tv_nsec: %d, clock_type: %d }, "
       "cq=%p, tag=%p)",
-      7, (channel, (int)last_observed_state, (long long)deadline.tv_sec,
-          (int)deadline.tv_nsec, (int)deadline.clock_type, cq, tag));
+      7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
+          (int)deadline.clock_type, cq, tag));
 
   grpc_cq_begin_op(cq, tag);
 
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c
index 1d5a7d52246c9c41184dd31488fd7d1637b5ea22..a096435c984d93cc2b556122d0007a543c04531b 100644
--- a/src/core/ext/client_config/client_channel.c
+++ b/src/core/ext/client_config/client_channel.c
@@ -367,6 +367,8 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
                               uint32_t initial_metadata_flags,
                               grpc_connected_subchannel **connected_subchannel,
                               grpc_closure *on_ready) {
+  GPR_TIMER_BEGIN("cc_pick_subchannel", 0);
+
   grpc_call_element *elem = elemp;
   channel_data *chand = elem->channel_data;
   call_data *calld = elem->call_data;
@@ -391,6 +393,7 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
       }
     }
     gpr_mu_unlock(&chand->mu_config);
+    GPR_TIMER_END("cc_pick_subchannel", 0);
     return 1;
   }
   if (chand->lb_policy != NULL) {
@@ -402,6 +405,7 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
                             initial_metadata, initial_metadata_flags,
                             connected_subchannel, on_ready);
     GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
+    GPR_TIMER_END("cc_pick_subchannel", 0);
     return r;
   }
   if (chand->resolver != NULL && !chand->started_resolving) {
@@ -426,6 +430,8 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
                         NULL);
   }
   gpr_mu_unlock(&chand->mu_config);
+
+  GPR_TIMER_END("cc_pick_subchannel", 0);
   return 0;
 }
 
diff --git a/src/core/ext/client_config/subchannel.c b/src/core/ext/client_config/subchannel.c
index 468067ea57c0c1c854635cb92378ad0f47531ec1..d089cd4399e8fc933abf63b2d0dd25d5e4b1439c 100644
--- a/src/core/ext/client_config/subchannel.c
+++ b/src/core/ext/client_config/subchannel.c
@@ -690,9 +690,11 @@ char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
 void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
                                      grpc_subchannel_call *call,
                                      grpc_transport_stream_op *op) {
+  GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
   grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
   grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
   top_elem->filter->start_transport_stream_op(exec_ctx, top_elem, op);
+  GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
 }
 
 grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
diff --git a/src/core/ext/client_config/subchannel_call_holder.c b/src/core/ext/client_config/subchannel_call_holder.c
index e31800edd98ede2fc495b5d20b57303d74750f99..b96a0ad093707ae45c7a1fc1ee06f3e44a729e82 100644
--- a/src/core/ext/client_config/subchannel_call_holder.c
+++ b/src/core/ext/client_config/subchannel_call_holder.c
@@ -120,16 +120,13 @@ retry:
     return;
   }
   /* if this is a cancellation, then we can raise our cancelled flag */
-  if (op->cancel_with_status != GRPC_STATUS_OK) {
+  if (op->cancel_error != GRPC_ERROR_NONE) {
     if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
       goto retry;
     } else {
       switch (holder->creation_phase) {
         case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
-          fail_locked(exec_ctx, holder,
-                      grpc_error_set_int(GRPC_ERROR_CREATE("Cancelled"),
-                                         GRPC_ERROR_INT_GRPC_STATUS,
-                                         op->cancel_with_status));
+          fail_locked(exec_ctx, holder, GRPC_ERROR_REF(op->cancel_error));
           break;
         case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
           holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
diff --git a/src/core/ext/resolver/zookeeper/README.md b/src/core/ext/resolver/zookeeper/README.md
deleted file mode 100644
index ce6f39683bb56ab255a5e72e9eb87e453f13b57e..0000000000000000000000000000000000000000
--- a/src/core/ext/resolver/zookeeper/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Zookeeper based name resolver: WIP
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
index 9bae3a94f9988028f400cdef0100a03f3cd3fc08..e5c987925c3bd17e7dfc3fe0fa3acd486e2ca5a8 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@@ -97,7 +97,8 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
     goto error;
   }
 
-  err = grpc_tcp_server_create(NULL, &tcp);
+  err =
+      grpc_tcp_server_create(NULL, grpc_server_get_channel_args(server), &tcp);
   if (err != GRPC_ERROR_NONE) {
     goto error;
   }
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index ead8a4d56659b652c40225f5dab12de204e1f002..c42810e9130a5586721827d08fab7478788e0643 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -216,7 +216,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
   state = gpr_malloc(sizeof(*state));
   memset(state, 0, sizeof(*state));
   grpc_closure_init(&state->destroy_closure, destroy_done, state);
-  err = grpc_tcp_server_create(&state->destroy_closure, &tcp);
+  err = grpc_tcp_server_create(&state->destroy_closure,
+                               grpc_server_get_channel_args(server), &tcp);
   if (err != GRPC_ERROR_NONE) {
     goto error;
   }
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 9aa39ba26c2dcd5ae0700dbc4c1cec6c6e99de9a..5aae753c07d51278ee125d35cae1180bdd2c37a4 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -106,14 +106,12 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
 static void cancel_from_api(grpc_exec_ctx *exec_ctx,
                             grpc_chttp2_transport_global *transport_global,
                             grpc_chttp2_stream_global *stream_global,
-                            grpc_status_code status,
-                            gpr_slice *optional_message);
+                            grpc_error *error);
 
 static void close_from_api(grpc_exec_ctx *exec_ctx,
                            grpc_chttp2_transport_global *transport_global,
                            grpc_chttp2_stream_global *stream_global,
-                           grpc_status_code status,
-                           gpr_slice *optional_message);
+                           grpc_error *error);
 
 /** Add endpoint from this transport to pollset */
 static void add_to_pollset_locked(grpc_exec_ctx *exec_ctx,
@@ -163,8 +161,6 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
 
   GPR_ASSERT(t->ep == NULL);
 
-  gpr_slice_unref(t->optional_drop_message);
-
   gpr_slice_buffer_destroy(&t->global.qbuf);
 
   gpr_slice_buffer_destroy(&t->writing.outbuf);
@@ -264,8 +260,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
   t->parsing.is_client = is_client;
   t->parsing.deframe_state =
       is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
+  t->parsing.is_first_frame = true;
   t->writing.is_client = is_client;
-  t->optional_drop_message = gpr_empty_slice();
   grpc_connectivity_state_init(
       &t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
       is_client ? "client_transport" : "server_transport");
@@ -517,6 +513,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
       &s->global.received_trailing_metadata);
   grpc_chttp2_data_parser_init(&s->parsing.data_parser);
   gpr_slice_buffer_init(&s->writing.flow_controlled_buffer);
+  s->global.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
 
   REF_TRANSPORT(t, "stream");
 
@@ -593,6 +590,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx,
   grpc_chttp2_incoming_metadata_buffer_destroy(
       &s->global.received_trailing_metadata);
   gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer);
+  GRPC_ERROR_UNREF(s->global.removal_error);
 
   UNREF_TRANSPORT(exec_ctx, t, "stream");
 
@@ -641,10 +639,11 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx,
   grpc_chttp2_executor_action_header *hdr;
   grpc_chttp2_executor_action_header *next;
 
+  GPR_TIMER_BEGIN("finish_global_actions", 0);
+
   for (;;) {
     if (!t->executor.writing_active && !t->closed &&
-        grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing,
-                                           t->executor.parsing_active)) {
+        grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) {
       t->executor.writing_active = 1;
       REF_TRANSPORT(t, "writing");
       prevent_endpoint_shutdown(t);
@@ -659,7 +658,9 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx,
           NULL;
       gpr_mu_unlock(&t->executor.mu);
       while (hdr != NULL) {
+        GPR_TIMER_BEGIN("chttp2:locked_action", 0);
         hdr->action(exec_ctx, t, hdr->stream, hdr->arg);
+        GPR_TIMER_END("chttp2:locked_action", 0);
         next = hdr->next;
         gpr_free(hdr);
         UNREF_TRANSPORT(exec_ctx, t, "pending_action");
@@ -672,6 +673,8 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx,
     gpr_mu_unlock(&t->executor.mu);
     break;
   }
+
+  GPR_TIMER_END("finish_global_actions", 0);
 }
 
 void grpc_chttp2_run_with_global_lock(grpc_exec_ctx *exec_ctx,
@@ -681,6 +684,8 @@ void grpc_chttp2_run_with_global_lock(grpc_exec_ctx *exec_ctx,
                                       void *arg, size_t sizeof_arg) {
   grpc_chttp2_executor_action_header *hdr;
 
+  GPR_TIMER_BEGIN("grpc_chttp2_run_with_global_lock", 0);
+
   REF_TRANSPORT(t, "run_global");
   gpr_mu_lock(&t->executor.mu);
 
@@ -689,7 +694,9 @@ void grpc_chttp2_run_with_global_lock(grpc_exec_ctx *exec_ctx,
       t->executor.global_active = 1;
       gpr_mu_unlock(&t->executor.mu);
 
+      GPR_TIMER_BEGIN("chttp2:locked_action", 0);
       action(exec_ctx, t, optional_stream, arg);
+      GPR_TIMER_END("chttp2:locked_action", 0);
 
       finish_global_actions(exec_ctx, t);
     } else {
@@ -726,6 +733,8 @@ void grpc_chttp2_run_with_global_lock(grpc_exec_ctx *exec_ctx,
   }
 
   UNREF_TRANSPORT(exec_ctx, t, "run_global");
+
+  GPR_TIMER_END("grpc_chttp2_run_with_global_lock", 0);
 }
 
 /*******************************************************************************
@@ -876,7 +885,9 @@ static void maybe_start_some_streams(
          grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
                                                       &stream_global)) {
     cancel_from_api(exec_ctx, transport_global, stream_global,
-                    GRPC_STATUS_UNAVAILABLE, NULL);
+                    grpc_error_set_int(
+                        GRPC_ERROR_CREATE("Stream IDs exhausted"),
+                        GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
   }
 }
 
@@ -958,14 +969,14 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
     on_complete->next_data.scratch |= CLOSURE_BARRIER_STATS_BIT;
   }
 
-  if (op->cancel_with_status != GRPC_STATUS_OK) {
+  if (op->cancel_error != GRPC_ERROR_NONE) {
     cancel_from_api(exec_ctx, transport_global, stream_global,
-                    op->cancel_with_status, op->optional_close_message);
+                    GRPC_ERROR_REF(op->cancel_error));
   }
 
-  if (op->close_with_status != GRPC_STATUS_OK) {
+  if (op->close_error != GRPC_ERROR_NONE) {
     close_from_api(exec_ctx, transport_global, stream_global,
-                   op->close_with_status, op->optional_close_message);
+                   GRPC_ERROR_REF(op->close_error));
   }
 
   if (op->send_initial_metadata != NULL) {
@@ -978,13 +989,22 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
     const size_t metadata_peer_limit =
         transport_global->settings[GRPC_PEER_SETTINGS]
                                   [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+    if (transport_global->is_client) {
+      stream_global->deadline =
+          gpr_time_min(stream_global->deadline,
+                       stream_global->send_initial_metadata->deadline);
+    }
     if (metadata_size > metadata_peer_limit) {
-      gpr_log(GPR_DEBUG,
-              "to-be-sent initial metadata size exceeds peer limit "
-              "(%" PRIuPTR " vs. %" PRIuPTR ")",
-              metadata_size, metadata_peer_limit);
-      cancel_from_api(exec_ctx, transport_global, stream_global,
-                      GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
+      cancel_from_api(
+          exec_ctx, transport_global, stream_global,
+          grpc_error_set_int(
+              grpc_error_set_int(
+                  grpc_error_set_int(
+                      GRPC_ERROR_CREATE("to-be-sent initial metadata size "
+                                        "exceeds peer limit"),
+                      GRPC_ERROR_INT_SIZE, (intptr_t)metadata_size),
+                  GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
+              GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
     } else {
       if (contains_non_ok_status(transport_global, op->send_initial_metadata)) {
         stream_global->seen_error = true;
@@ -1038,12 +1058,16 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
         transport_global->settings[GRPC_PEER_SETTINGS]
                                   [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
     if (metadata_size > metadata_peer_limit) {
-      gpr_log(GPR_DEBUG,
-              "to-be-sent trailing metadata size exceeds peer limit "
-              "(%" PRIuPTR " vs. %" PRIuPTR ")",
-              metadata_size, metadata_peer_limit);
-      cancel_from_api(exec_ctx, transport_global, stream_global,
-                      GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
+      cancel_from_api(
+          exec_ctx, transport_global, stream_global,
+          grpc_error_set_int(
+              grpc_error_set_int(
+                  grpc_error_set_int(
+                      GRPC_ERROR_CREATE("to-be-sent trailing metadata size "
+                                        "exceeds peer limit"),
+                      GRPC_ERROR_INT_SIZE, (intptr_t)metadata_size),
+                  GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
+              GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
     } else {
       if (contains_non_ok_status(transport_global,
                                  op->send_trailing_metadata)) {
@@ -1093,6 +1117,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
     stream_global->recv_trailing_metadata_finished =
         add_closure_barrier(on_complete);
     stream_global->recv_trailing_metadata = op->recv_trailing_metadata;
+    stream_global->final_metadata_requested = true;
     grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
   }
 
@@ -1234,8 +1259,12 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
           incoming_byte_stream_destroy_locked(exec_ctx, NULL, NULL, bs);
         }
         if (stream_global->exceeded_metadata_size) {
-          cancel_from_api(exec_ctx, transport_global, stream_global,
-                          GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
+          cancel_from_api(
+              exec_ctx, transport_global, stream_global,
+              grpc_error_set_int(
+                  GRPC_ERROR_CREATE(
+                      "received initial metadata size exceeds limit"),
+                  GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
         }
       }
       grpc_chttp2_incoming_metadata_buffer_publish(
@@ -1246,7 +1275,8 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
       stream_global->recv_initial_metadata_ready = NULL;
     }
     if (stream_global->recv_message_ready != NULL) {
-      while (stream_global->seen_error &&
+      while (stream_global->final_metadata_requested &&
+             stream_global->seen_error &&
              (bs = grpc_chttp2_incoming_frame_queue_pop(
                   &stream_global->incoming_frames)) != NULL) {
         incoming_byte_stream_destroy_locked(exec_ctx, NULL, NULL, bs);
@@ -1273,8 +1303,12 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
           incoming_byte_stream_destroy_locked(exec_ctx, NULL, NULL, bs);
         }
         if (stream_global->exceeded_metadata_size) {
-          cancel_from_api(exec_ctx, transport_global, stream_global,
-                          GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
+          cancel_from_api(
+              exec_ctx, transport_global, stream_global,
+              grpc_error_set_int(
+                  GRPC_ERROR_CREATE(
+                      "received trailing metadata size exceeds limit"),
+                  GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
         }
       }
       if (stream_global->all_incoming_byte_streams_finished) {
@@ -1314,15 +1348,15 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
   }
   if (s->parsing.data_parser.parsing_frame != NULL) {
     grpc_chttp2_incoming_byte_stream_finished(
-        exec_ctx, s->parsing.data_parser.parsing_frame,
-        GRPC_ERROR_CREATE_REFERENCING("Stream removed", &error, 1), 0);
+        exec_ctx, s->parsing.data_parser.parsing_frame, GRPC_ERROR_REF(error),
+        0);
     s->parsing.data_parser.parsing_frame = NULL;
   }
 
   if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
     close_transport_locked(
-        exec_ctx, t,
-        GRPC_ERROR_CREATE("Last stream closed after sending GOAWAY"));
+        exec_ctx, t, GRPC_ERROR_CREATE_REFERENCING(
+                         "Last stream closed after sending GOAWAY", &error, 1));
   }
   if (grpc_chttp2_list_remove_writable_stream(&t->global, &s->global)) {
     GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "chttp2_writing");
@@ -1338,35 +1372,68 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
   GRPC_ERROR_UNREF(error);
 }
 
+static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
+                                    grpc_chttp2_error_code *http2_error,
+                                    grpc_status_code *grpc_status) {
+  intptr_t ip_http;
+  intptr_t ip_grpc;
+  bool have_http =
+      grpc_error_get_int(error, GRPC_ERROR_INT_HTTP2_ERROR, &ip_http);
+  bool have_grpc =
+      grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &ip_grpc);
+  if (have_http) {
+    *http2_error = (grpc_chttp2_error_code)ip_http;
+  } else if (have_grpc) {
+    *http2_error =
+        grpc_chttp2_grpc_status_to_http2_error((grpc_status_code)ip_grpc);
+  } else {
+    *http2_error = GRPC_CHTTP2_INTERNAL_ERROR;
+  }
+  if (have_grpc) {
+    *grpc_status = (grpc_status_code)ip_grpc;
+  } else if (have_http) {
+    *grpc_status = grpc_chttp2_http2_error_to_grpc_status(
+        (grpc_chttp2_error_code)ip_http, deadline);
+  } else {
+    *grpc_status = GRPC_STATUS_INTERNAL;
+  }
+}
+
 static void cancel_from_api(grpc_exec_ctx *exec_ctx,
                             grpc_chttp2_transport_global *transport_global,
                             grpc_chttp2_stream_global *stream_global,
-                            grpc_status_code status,
-                            gpr_slice *optional_message) {
+                            grpc_error *due_to_error) {
   if (!stream_global->read_closed || !stream_global->write_closed) {
+    grpc_status_code grpc_status;
+    grpc_chttp2_error_code http_error;
+    status_codes_from_error(due_to_error, stream_global->deadline, &http_error,
+                            &grpc_status);
+
     if (stream_global->id != 0) {
       gpr_slice_buffer_add(
           &transport_global->qbuf,
-          grpc_chttp2_rst_stream_create(
-              stream_global->id,
-              (uint32_t)grpc_chttp2_grpc_status_to_http2_error(status),
-              &stream_global->stats.outgoing));
+          grpc_chttp2_rst_stream_create(stream_global->id, (uint32_t)http_error,
+                                        &stream_global->stats.outgoing));
     }
 
-    if (optional_message) {
-      gpr_slice_ref(*optional_message);
+    const char *msg =
+        grpc_error_get_str(due_to_error, GRPC_ERROR_STR_GRPC_MESSAGE);
+    bool free_msg = false;
+    if (msg == NULL) {
+      free_msg = true;
+      msg = grpc_error_string(due_to_error);
     }
-    grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
-                            optional_message);
+    gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
+    grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
+                            grpc_status, &msg_slice);
+    if (free_msg) grpc_error_free_string(msg);
   }
-  if (status != GRPC_STATUS_OK && !stream_global->seen_error) {
+  if (due_to_error != GRPC_ERROR_NONE && !stream_global->seen_error) {
     stream_global->seen_error = true;
     grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
   }
-  grpc_chttp2_mark_stream_closed(
-      exec_ctx, transport_global, stream_global, 1, 1,
-      grpc_error_set_int(GRPC_ERROR_CREATE("Cancelled"),
-                         GRPC_ERROR_INT_GRPC_STATUS, status));
+  grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
+                                 1, due_to_error);
 }
 
 void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
@@ -1449,6 +1516,7 @@ void grpc_chttp2_mark_stream_closed(
     }
   }
   if (stream_global->read_closed && stream_global->write_closed) {
+    stream_global->removal_error = GRPC_ERROR_REF(error);
     if (stream_global->id != 0 &&
         TRANSPORT_FROM_GLOBAL(transport_global)->executor.parsing_active) {
       grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
@@ -1467,15 +1535,18 @@ void grpc_chttp2_mark_stream_closed(
 static void close_from_api(grpc_exec_ctx *exec_ctx,
                            grpc_chttp2_transport_global *transport_global,
                            grpc_chttp2_stream_global *stream_global,
-                           grpc_status_code status,
-                           gpr_slice *optional_message) {
+                           grpc_error *error) {
   gpr_slice hdr;
   gpr_slice status_hdr;
   gpr_slice message_pfx;
   uint8_t *p;
   uint32_t len = 0;
+  grpc_status_code grpc_status;
+  grpc_chttp2_error_code http_error;
+  status_codes_from_error(error, stream_global->deadline, &http_error,
+                          &grpc_status);
 
-  GPR_ASSERT(status >= 0 && (int)status < 100);
+  GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
 
   if (stream_global->id != 0 && !transport_global->is_client) {
     /* Hand roll a header block.
@@ -1485,7 +1556,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
        time we got around to sending this, so instead we ignore HPACK
        compression
        and just write the uncompressed bytes onto the wire. */
-    status_hdr = gpr_slice_malloc(15 + (status >= 10));
+    status_hdr = gpr_slice_malloc(15 + (grpc_status >= 10));
     p = GPR_SLICE_START_PTR(status_hdr);
     *p++ = 0x40; /* literal header */
     *p++ = 11;   /* len(grpc-status) */
@@ -1500,19 +1571,23 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
     *p++ = 't';
     *p++ = 'u';
     *p++ = 's';
-    if (status < 10) {
+    if (grpc_status < 10) {
       *p++ = 1;
-      *p++ = (uint8_t)('0' + status);
+      *p++ = (uint8_t)('0' + grpc_status);
     } else {
       *p++ = 2;
-      *p++ = (uint8_t)('0' + (status / 10));
-      *p++ = (uint8_t)('0' + (status % 10));
+      *p++ = (uint8_t)('0' + (grpc_status / 10));
+      *p++ = (uint8_t)('0' + (grpc_status % 10));
     }
     GPR_ASSERT(p == GPR_SLICE_END_PTR(status_hdr));
     len += (uint32_t)GPR_SLICE_LENGTH(status_hdr);
 
-    if (optional_message) {
-      GPR_ASSERT(GPR_SLICE_LENGTH(*optional_message) < 127);
+    const char *optional_message =
+        grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
+
+    if (optional_message != NULL) {
+      size_t msg_len = strlen(optional_message);
+      GPR_ASSERT(msg_len < 127);
       message_pfx = gpr_slice_malloc(15);
       p = GPR_SLICE_START_PTR(message_pfx);
       *p++ = 0x40;
@@ -1529,10 +1604,10 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
       *p++ = 'a';
       *p++ = 'g';
       *p++ = 'e';
-      *p++ = (uint8_t)GPR_SLICE_LENGTH(*optional_message);
+      *p++ = (uint8_t)msg_len;
       GPR_ASSERT(p == GPR_SLICE_END_PTR(message_pfx));
       len += (uint32_t)GPR_SLICE_LENGTH(message_pfx);
-      len += (uint32_t)GPR_SLICE_LENGTH(*optional_message);
+      len += (uint32_t)msg_len;
     }
 
     hdr = gpr_slice_malloc(9);
@@ -1553,53 +1628,53 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
     if (optional_message) {
       gpr_slice_buffer_add(&transport_global->qbuf, message_pfx);
       gpr_slice_buffer_add(&transport_global->qbuf,
-                           gpr_slice_ref(*optional_message));
+                           gpr_slice_from_copied_string(optional_message));
     }
-
     gpr_slice_buffer_add(
         &transport_global->qbuf,
         grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR,
                                       &stream_global->stats.outgoing));
-
-    if (optional_message) {
-      gpr_slice_ref(*optional_message);
-    }
   }
 
-  grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
-                          optional_message);
-  grpc_error *err = GRPC_ERROR_CREATE("Stream closed");
-  err = grpc_error_set_int(err, GRPC_ERROR_INT_GRPC_STATUS, status);
-  if (optional_message) {
-    char *str =
-        gpr_dump_slice(*optional_message, GPR_DUMP_HEX | GPR_DUMP_ASCII);
-    err = grpc_error_set_str(err, GRPC_ERROR_STR_GRPC_MESSAGE, str);
-    gpr_free(str);
+  const char *msg = grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
+  bool free_msg = false;
+  if (msg == NULL) {
+    free_msg = true;
+    msg = grpc_error_string(error);
   }
+  gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
+  grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
+                          grpc_status, &msg_slice);
+  if (free_msg) grpc_error_free_string(msg);
+
   grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
-                                 1, err);
+                                 1, error);
 }
 
+typedef struct {
+  grpc_exec_ctx *exec_ctx;
+  grpc_error *error;
+} cancel_stream_cb_args;
+
 static void cancel_stream_cb(grpc_chttp2_transport_global *transport_global,
                              void *user_data,
                              grpc_chttp2_stream_global *stream_global) {
-  grpc_chttp2_transport *transport = TRANSPORT_FROM_GLOBAL(transport_global);
-  cancel_from_api(user_data, transport_global, stream_global,
-                  GRPC_STATUS_UNAVAILABLE,
-                  GPR_SLICE_IS_EMPTY(transport->optional_drop_message)
-                      ? NULL
-                      : &transport->optional_drop_message);
+  cancel_stream_cb_args *args = user_data;
+  cancel_from_api(args->exec_ctx, transport_global, stream_global,
+                  GRPC_ERROR_REF(args->error));
 }
 
-static void end_all_the_calls(grpc_exec_ctx *exec_ctx,
-                              grpc_chttp2_transport *t) {
-  grpc_chttp2_for_all_streams(&t->global, exec_ctx, cancel_stream_cb);
+static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+                              grpc_error *error) {
+  cancel_stream_cb_args args = {exec_ctx, error};
+  grpc_chttp2_for_all_streams(&t->global, &args, cancel_stream_cb);
+  GRPC_ERROR_UNREF(error);
 }
 
 static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
                             grpc_error *error) {
-  close_transport_locked(exec_ctx, t, error);
-  end_all_the_calls(exec_ctx, t);
+  close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
+  end_all_the_calls(exec_ctx, t, error);
 }
 
 /** update window from a settings change */
@@ -1706,15 +1781,7 @@ static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg,
                                          t->read_buffer.slices[i]);
   };
   if (i != t->read_buffer.count) {
-    gpr_slice_unref(t->optional_drop_message);
     errors[2] = try_http_parsing(exec_ctx, t);
-    if (errors[2] != GRPC_ERROR_NONE) {
-      t->optional_drop_message = gpr_slice_from_copied_string(
-          "Connection dropped: received http1.x response");
-    } else {
-      t->optional_drop_message = gpr_slice_from_copied_string(
-          "Connection dropped: received unparseable response");
-    }
   }
   grpc_error *err =
       errors[0] == GRPC_ERROR_NONE && errors[1] == GRPC_ERROR_NONE &&
@@ -1765,7 +1832,7 @@ static void post_parse_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
     GPR_ASSERT(stream_global->write_closed);
     GPR_ASSERT(stream_global->read_closed);
     remove_stream(exec_ctx, t, stream_global->id,
-                  GRPC_ERROR_CREATE("Stream removed"));
+                  GRPC_ERROR_REF(stream_global->removal_error));
     GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
   }
 
@@ -1782,6 +1849,10 @@ static void post_reading_action_locked(grpc_exec_ctx *exec_ctx,
     error = GRPC_ERROR_CREATE("Transport closed");
   }
   if (error != GRPC_ERROR_NONE) {
+    if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
+      error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
+                                 GRPC_STATUS_UNAVAILABLE);
+    }
     drop_connection(exec_ctx, t, GRPC_ERROR_REF(error));
     t->endpoint_reading = 0;
     if (!t->executor.writing_active && t->ep) {
@@ -1796,6 +1867,7 @@ static void post_reading_action_locked(grpc_exec_ctx *exec_ctx,
     prevent_endpoint_shutdown(t);
   }
   gpr_slice_buffer_reset_and_unref(&t->read_buffer);
+  GRPC_ERROR_UNREF(error);
 
   if (keep_reading) {
     grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->reading_action);
@@ -1804,8 +1876,6 @@ static void post_reading_action_locked(grpc_exec_ctx *exec_ctx,
   } else {
     UNREF_TRANSPORT(exec_ctx, t, "reading_action");
   }
-
-  GRPC_ERROR_UNREF(error);
 }
 
 /*******************************************************************************
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index a7aefb99158a295b042c709f7309865039957ad6..e3a3c9e4a7ca34aa04794b3f25b648afffe58459 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -102,12 +102,14 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
   if (p->byte == 4) {
     GPR_ASSERT(is_last);
     stream_parsing->received_close = 1;
-    stream_parsing->forced_close_error = grpc_error_set_int(
-        GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
-        (intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
-                   (((uint32_t)p->reason_bytes[1]) << 16) |
-                   (((uint32_t)p->reason_bytes[2]) << 8) |
-                   (((uint32_t)p->reason_bytes[3]))));
+    if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
+      stream_parsing->forced_close_error = grpc_error_set_int(
+          GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
+          (intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
+                     (((uint32_t)p->reason_bytes[1]) << 16) |
+                     (((uint32_t)p->reason_bytes[2]) << 8) |
+                     (((uint32_t)p->reason_bytes[3]))));
+    }
   }
 
   return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 54e72ebd24986a9102ac2405b4ada0cf0fa8460b..8d79e93ceb30f33a24796e35b7a57ac5f99a58b7 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -265,6 +265,7 @@ struct grpc_chttp2_transport_parsing {
   uint8_t incoming_frame_type;
   uint8_t incoming_frame_flags;
   uint8_t header_eof;
+  bool is_first_frame;
   uint32_t expect_continuation_stream_id;
   uint32_t incoming_frame_size;
   uint32_t incoming_stream_id;
@@ -383,9 +384,6 @@ struct grpc_chttp2_transport {
 
   /** Transport op to be applied post-parsing */
   grpc_transport_op *post_parsing_op;
-
-  /** Message explaining the reason of dropping connection */
-  gpr_slice optional_drop_message;
 };
 
 typedef struct {
@@ -438,13 +436,19 @@ typedef struct {
   bool seen_error;
   bool exceeded_metadata_size;
 
+  /** the error that resulted in this stream being removed */
+  grpc_error *removal_error;
+
   bool published_initial_metadata;
   bool published_trailing_metadata;
+  bool final_metadata_requested;
 
   grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
   grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
 
   grpc_chttp2_incoming_frame_queue incoming_frames;
+
+  gpr_timespec deadline;
 } grpc_chttp2_stream_global;
 
 typedef struct {
@@ -524,8 +528,7 @@ struct grpc_chttp2_stream {
     are required, and schedule them if so */
 int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
                                        grpc_chttp2_transport_global *global,
-                                       grpc_chttp2_transport_writing *writing,
-                                       int is_parsing);
+                                       grpc_chttp2_transport_writing *writing);
 void grpc_chttp2_perform_writes(
     grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
     grpc_endpoint *endpoint);
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 785134091f5fc1ddfec4c39d10a3c73dd19dc1af..84eb5752f16a662179128c8ac910d875c91e30cf 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -236,9 +236,10 @@ void grpc_chttp2_publish_reads(
                                            GRPC_ERROR_INT_HTTP2_ERROR, &reason);
       if (has_reason && reason != GRPC_CHTTP2_NO_ERROR) {
         grpc_status_code status_code =
-            has_reason ? grpc_chttp2_http2_error_to_grpc_status(
-                             (grpc_chttp2_error_code)reason)
-                       : GRPC_STATUS_INTERNAL;
+            has_reason
+                ? grpc_chttp2_http2_error_to_grpc_status(
+                      (grpc_chttp2_error_code)reason, stream_global->deadline)
+                : GRPC_STATUS_INTERNAL;
         const char *status_details =
             grpc_error_string(stream_parsing->forced_close_error);
         gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
@@ -468,6 +469,17 @@ grpc_error *grpc_chttp2_perform_read(
 
 static grpc_error *init_frame_parser(
     grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+  if (transport_parsing->is_first_frame &&
+      transport_parsing->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
+    char *msg;
+    gpr_asprintf(
+        &msg, "Expected SETTINGS frame as the first frame, got frame type %d",
+        transport_parsing->incoming_frame_type);
+    grpc_error *err = GRPC_ERROR_CREATE(msg);
+    gpr_free(msg);
+    return err;
+  }
+  transport_parsing->is_first_frame = false;
   if (transport_parsing->expect_continuation_stream_id != 0) {
     if (transport_parsing->incoming_frame_type !=
         GRPC_CHTTP2_FRAME_CONTINUATION) {
diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.c b/src/core/ext/transport/chttp2/transport/status_conversion.c
index c42fb9b3a1e49e56d077de51cdb9af6565de46ee..5dce2f2d0ccf2c7db6162acb8157ded7489650d0 100644
--- a/src/core/ext/transport/chttp2/transport/status_conversion.c
+++ b/src/core/ext/transport/chttp2/transport/status_conversion.c
@@ -39,6 +39,8 @@ int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) {
       return GRPC_CHTTP2_NO_ERROR;
     case GRPC_STATUS_CANCELLED:
       return GRPC_CHTTP2_CANCEL;
+    case GRPC_STATUS_DEADLINE_EXCEEDED:
+      return GRPC_CHTTP2_CANCEL;
     case GRPC_STATUS_RESOURCE_EXHAUSTED:
       return GRPC_CHTTP2_ENHANCE_YOUR_CALM;
     case GRPC_STATUS_PERMISSION_DENIED:
@@ -51,13 +53,17 @@ int grpc_chttp2_grpc_status_to_http2_error(grpc_status_code status) {
 }
 
 grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
-    grpc_chttp2_error_code error) {
+    grpc_chttp2_error_code error, gpr_timespec deadline) {
   switch (error) {
     case GRPC_CHTTP2_NO_ERROR:
       /* should never be received */
       return GRPC_STATUS_INTERNAL;
     case GRPC_CHTTP2_CANCEL:
-      return GRPC_STATUS_CANCELLED;
+      /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been
+       * exceeded */
+      return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0
+                 ? GRPC_STATUS_DEADLINE_EXCEEDED
+                 : GRPC_STATUS_CANCELLED;
     case GRPC_CHTTP2_ENHANCE_YOUR_CALM:
       return GRPC_STATUS_RESOURCE_EXHAUSTED;
     case GRPC_CHTTP2_INADEQUATE_SECURITY:
diff --git a/src/core/ext/transport/chttp2/transport/status_conversion.h b/src/core/ext/transport/chttp2/transport/status_conversion.h
index e7285e6fd51125e805877cf381c232024aaa4929..953bc9f1e1f677ea379b8d93cdb7080c4510f5f2 100644
--- a/src/core/ext/transport/chttp2/transport/status_conversion.h
+++ b/src/core/ext/transport/chttp2/transport/status_conversion.h
@@ -41,7 +41,7 @@
 grpc_chttp2_error_code grpc_chttp2_grpc_status_to_http2_error(
     grpc_status_code status);
 grpc_status_code grpc_chttp2_http2_error_to_grpc_status(
-    grpc_chttp2_error_code error);
+    grpc_chttp2_error_code error, gpr_timespec deadline);
 
 /* Conversion of HTTP status codes (:status) to grpc status codes */
 grpc_status_code grpc_chttp2_http2_status_to_grpc_status(int status);
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index add7678182745e6a570acbad17385c663d0096d0..b19f5f068df9effc8330a8450609d94a18d64a9e 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -45,7 +45,7 @@ static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
 
 int grpc_chttp2_unlocking_check_writes(
     grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
-    grpc_chttp2_transport_writing *transport_writing, int is_parsing) {
+    grpc_chttp2_transport_writing *transport_writing) {
   grpc_chttp2_stream_global *stream_global;
   grpc_chttp2_stream_writing *stream_writing;
 
@@ -61,7 +61,7 @@ int grpc_chttp2_unlocking_check_writes(
                                 [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
 
   if (transport_global->dirtied_local_settings &&
-      !transport_global->sent_local_settings && !is_parsing) {
+      !transport_global->sent_local_settings) {
     gpr_slice_buffer_add(
         &transport_writing->outbuf,
         grpc_chttp2_settings_create(
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index bbba85d80ba9ecbd3572708e119528ded7413331..87175d7943314c8a8392ab9c0e4a240ad831f802 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -263,6 +263,17 @@ void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
                                    grpc_call_element *cur_elem) {
   grpc_transport_stream_op op;
   memset(&op, 0, sizeof(op));
-  op.cancel_with_status = GRPC_STATUS_CANCELLED;
+  op.cancel_error = GRPC_ERROR_CANCELLED;
+  grpc_call_next_op(exec_ctx, cur_elem, &op);
+}
+
+void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
+                                                grpc_call_element *cur_elem,
+                                                grpc_status_code status,
+                                                gpr_slice *optional_message) {
+  grpc_transport_stream_op op;
+  memset(&op, 0, sizeof(op));
+  grpc_transport_stream_op_add_cancellation_with_message(&op, status,
+                                                         optional_message);
   grpc_call_next_op(exec_ctx, cur_elem, &op);
 }
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 41dd4a0d8a15b43ca176a914138e442c815cd482..d72c015b677d4760e879c5eec0f30ee1b8859104 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -273,6 +273,11 @@ void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
 void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx,
                                    grpc_call_element *cur_elem);
 
+void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx,
+                                                grpc_call_element *cur_elem,
+                                                grpc_status_code status,
+                                                gpr_slice *optional_message);
+
 extern int grpc_trace_channel;
 
 #define GRPC_CALL_LOG_OP(sev, elem, op) \
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index ab6c6c9ef0080842a98a747196058b3bd6648be1..8057e251f0b6adf812e2fd3ef98e8f2697feae4c 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -76,7 +76,13 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
   if (md == GRPC_MDELEM_STATUS_200) {
     return NULL;
   } else if (md->key == GRPC_MDSTR_STATUS) {
-    grpc_call_element_send_cancel(a->exec_ctx, a->elem);
+    char *message_string;
+    gpr_asprintf(&message_string, "Received http2 header with status: %s",
+                 grpc_mdstr_as_c_string(md->value));
+    gpr_slice message = gpr_slice_from_copied_string(message_string);
+    gpr_free(message_string);
+    grpc_call_element_send_cancel_with_message(a->exec_ctx, a->elem,
+                                               GRPC_STATUS_CANCELLED, &message);
     return NULL;
   } else if (md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
     return NULL;
diff --git a/src/core/lib/debug/trace.c b/src/core/lib/debug/trace.c
index 555f497b7847681f70dd2b29e5179b2433d3c0b0..c56046785b380571d60e2b599318be96673360b6 100644
--- a/src/core/lib/debug/trace.c
+++ b/src/core/lib/debug/trace.c
@@ -88,7 +88,11 @@ static void parse(const char *s) {
   split(s, &strings, &nstrings);
 
   for (i = 0; i < nstrings; i++) {
-    grpc_tracer_set_enabled(strings[i], 1);
+    if (strings[i][0] == '-') {
+      grpc_tracer_set_enabled(strings[i] + 1, 0);
+    } else {
+      grpc_tracer_set_enabled(strings[i], 1);
+    }
   }
 
   for (i = 0; i < nstrings; i++) {
@@ -117,7 +121,7 @@ int grpc_tracer_set_enabled(const char *name, int enabled) {
   tracer *t;
   if (0 == strcmp(name, "all")) {
     for (t = tracers; t; t = t->next) {
-      *t->flag = 1;
+      *t->flag = enabled;
     }
   } else {
     int found = 0;
diff --git a/src/core/lib/http/parser.c.orig b/src/core/lib/http/parser.c.orig
deleted file mode 100644
index 74d90fd8bfb9ab300e1bae2e1ca2081ba8005455..0000000000000000000000000000000000000000
--- a/src/core/lib/http/parser.c.orig
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/lib/http/parser.h"
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-
-int grpc_http1_trace = 0;
-
-static char *buf2str(void *buffer, size_t length) {
-  char *out = gpr_malloc(length + 1);
-  memcpy(out, buffer, length);
-  out[length] = 0;
-  return out;
-}
-
-static grpc_error *handle_response_line(grpc_http_parser *parser) {
-  uint8_t *beg = parser->cur_line;
-  uint8_t *cur = beg;
-  uint8_t *end = beg + parser->cur_line_length;
-
-  if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE("Expected 'H'");
-  if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
-  if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
-  if (cur == end || *cur++ != 'P') return GRPC_ERROR_CREATE("Expected 'P'");
-  if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE("Expected '/'");
-  if (cur == end || *cur++ != '1') return GRPC_ERROR_CREATE("Expected '1'");
-  if (cur == end || *cur++ != '.') return GRPC_ERROR_CREATE("Expected '.'");
-  if (cur == end || *cur < '0' || *cur++ > '1') {
-    return GRPC_ERROR_CREATE("Expected HTTP/1.0 or HTTP/1.1");
-  }
-  if (cur == end || *cur++ != ' ') return GRPC_ERROR_CREATE("Expected ' '");
-  if (cur == end || *cur < '1' || *cur++ > '9')
-    return GRPC_ERROR_CREATE("Expected status code");
-  if (cur == end || *cur < '0' || *cur++ > '9')
-    return GRPC_ERROR_CREATE("Expected status code");
-  if (cur == end || *cur < '0' || *cur++ > '9')
-    return GRPC_ERROR_CREATE("Expected status code");
-  parser->http.response->status =
-      (cur[-3] - '0') * 100 + (cur[-2] - '0') * 10 + (cur[-1] - '0');
-  if (cur == end || *cur++ != ' ') return GRPC_ERROR_CREATE("Expected ' '");
-
-  /* we don't really care about the status code message */
-
-  return GRPC_ERROR_NONE;
-}
-
-static grpc_error *handle_request_line(grpc_http_parser *parser) {
-  uint8_t *beg = parser->cur_line;
-  uint8_t *cur = beg;
-  uint8_t *end = beg + parser->cur_line_length;
-  uint8_t vers_major = 0;
-  uint8_t vers_minor = 0;
-
-  while (cur != end && *cur++ != ' ')
-    ;
-  if (cur == end) return GRPC_ERROR_CREATE("No method on HTTP request line");
-  parser->http.request->method = buf2str(beg, (size_t)(cur - beg - 1));
-
-  beg = cur;
-  while (cur != end && *cur++ != ' ')
-    ;
-  if (cur == end) return GRPC_ERROR_CREATE("No path on HTTP request line");
-  parser->http.request->path = buf2str(beg, (size_t)(cur - beg - 1));
-
-  if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE("Expected 'H'");
-  if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
-  if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
-  if (cur == end || *cur++ != 'P') return GRPC_ERROR_CREATE("Expected 'P'");
-  if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE("Expected '/'");
-  vers_major = (uint8_t)(*cur++ - '1' + 1);
-  ++cur;
-  if (cur == end)
-    return GRPC_ERROR_CREATE("End of line in HTTP version string");
-  vers_minor = (uint8_t)(*cur++ - '1' + 1);
-
-  if (vers_major == 1) {
-    if (vers_minor == 0) {
-      parser->http.request->version = GRPC_HTTP_HTTP10;
-    } else if (vers_minor == 1) {
-      parser->http.request->version = GRPC_HTTP_HTTP11;
-    } else {
-      return GRPC_ERROR_CREATE(
-          "Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
-    }
-  } else if (vers_major == 2) {
-    if (vers_minor == 0) {
-      parser->http.request->version = GRPC_HTTP_HTTP20;
-    } else {
-      return GRPC_ERROR_CREATE(
-          "Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
-    }
-  } else {
-    return GRPC_ERROR_CREATE("Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
-  }
-
-  return GRPC_ERROR_NONE;
-}
-
-static grpc_error *handle_first_line(grpc_http_parser *parser) {
-  switch (parser->type) {
-    case GRPC_HTTP_REQUEST:
-      return handle_request_line(parser);
-    case GRPC_HTTP_RESPONSE:
-      return handle_response_line(parser);
-  }
-  GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
-}
-
-static grpc_error *add_header(grpc_http_parser *parser) {
-  uint8_t *beg = parser->cur_line;
-  uint8_t *cur = beg;
-  uint8_t *end = beg + parser->cur_line_length;
-  size_t *hdr_count = NULL;
-  grpc_http_header **hdrs = NULL;
-  grpc_http_header hdr = {NULL, NULL};
-  grpc_error *error = GRPC_ERROR_NONE;
-
-  GPR_ASSERT(cur != end);
-
-  if (*cur == ' ' || *cur == '\t') {
-    error = GRPC_ERROR_CREATE("Continued header lines not supported yet");
-    goto done;
-  }
-
-  while (cur != end && *cur != ':') {
-    cur++;
-  }
-  if (cur == end) {
-<<<<<<< HEAD
-    error = GRPC_ERROR_CREATE("Didn't find ':' in header string");
-    goto done;
-=======
-    if (grpc_http1_trace) {
-      gpr_log(GPR_ERROR, "Didn't find ':' in header string");
-    }
-    goto error;
->>>>>>> a709afe241d8b264a1c326315f757b4a8d330207
-  }
-  GPR_ASSERT(cur >= beg);
-  hdr.key = buf2str(beg, (size_t)(cur - beg));
-  cur++; /* skip : */
-
-  while (cur != end && (*cur == ' ' || *cur == '\t')) {
-    cur++;
-  }
-  GPR_ASSERT((size_t)(end - cur) >= parser->cur_line_end_length);
-  hdr.value = buf2str(cur, (size_t)(end - cur) - parser->cur_line_end_length);
-
-  switch (parser->type) {
-    case GRPC_HTTP_RESPONSE:
-      hdr_count = &parser->http.response->hdr_count;
-      hdrs = &parser->http.response->hdrs;
-      break;
-    case GRPC_HTTP_REQUEST:
-      hdr_count = &parser->http.request->hdr_count;
-      hdrs = &parser->http.request->hdrs;
-      break;
-  }
-
-  if (*hdr_count == parser->hdr_capacity) {
-    parser->hdr_capacity =
-        GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
-    *hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
-  }
-  (*hdrs)[(*hdr_count)++] = hdr;
-
-done:
-  if (error != GRPC_ERROR_NONE) {
-    gpr_free(hdr.key);
-    gpr_free(hdr.value);
-  }
-  return error;
-}
-
-static grpc_error *finish_line(grpc_http_parser *parser) {
-  grpc_error *err;
-  switch (parser->state) {
-    case GRPC_HTTP_FIRST_LINE:
-      err = handle_first_line(parser);
-      if (err != GRPC_ERROR_NONE) return err;
-      parser->state = GRPC_HTTP_HEADERS;
-      break;
-    case GRPC_HTTP_HEADERS:
-      if (parser->cur_line_length == parser->cur_line_end_length) {
-        parser->state = GRPC_HTTP_BODY;
-        break;
-      }
-      err = add_header(parser);
-      if (err != GRPC_ERROR_NONE) {
-        return err;
-      }
-      break;
-    case GRPC_HTTP_BODY:
-      GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
-  }
-
-  parser->cur_line_length = 0;
-  return GRPC_ERROR_NONE;
-}
-
-static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
-  size_t *body_length = NULL;
-  char **body = NULL;
-
-  if (parser->type == GRPC_HTTP_RESPONSE) {
-    body_length = &parser->http.response->body_length;
-    body = &parser->http.response->body;
-  } else if (parser->type == GRPC_HTTP_REQUEST) {
-    body_length = &parser->http.request->body_length;
-    body = &parser->http.request->body;
-  } else {
-    GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
-  }
-
-  if (*body_length == parser->body_capacity) {
-    parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
-    *body = gpr_realloc((void *)*body, parser->body_capacity);
-  }
-  (*body)[*body_length] = (char)byte;
-  (*body_length)++;
-
-  return GRPC_ERROR_NONE;
-}
-
-static bool check_line(grpc_http_parser *parser) {
-  if (parser->cur_line_length >= 2 &&
-      parser->cur_line[parser->cur_line_length - 2] == '\r' &&
-      parser->cur_line[parser->cur_line_length - 1] == '\n') {
-    return true;
-  }
-
-  // HTTP request with \n\r line termiantors.
-  else if (parser->cur_line_length >= 2 &&
-           parser->cur_line[parser->cur_line_length - 2] == '\n' &&
-           parser->cur_line[parser->cur_line_length - 1] == '\r') {
-    return true;
-  }
-
-  // HTTP request with only \n line terminators.
-  else if (parser->cur_line_length >= 1 &&
-           parser->cur_line[parser->cur_line_length - 1] == '\n') {
-    parser->cur_line_end_length = 1;
-    return true;
-  }
-
-  return false;
-}
-
-static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte) {
-  switch (parser->state) {
-    case GRPC_HTTP_FIRST_LINE:
-    case GRPC_HTTP_HEADERS:
-      if (parser->cur_line_length >= GRPC_HTTP_PARSER_MAX_HEADER_LENGTH) {
-        if (grpc_http1_trace)
-          gpr_log(GPR_ERROR, "HTTP client max line length (%d) exceeded",
-                  GRPC_HTTP_PARSER_MAX_HEADER_LENGTH);
-        return 0;
-      }
-      parser->cur_line[parser->cur_line_length] = byte;
-      parser->cur_line_length++;
-      if (check_line(parser)) {
-        return finish_line(parser);
-      } else {
-        return GRPC_ERROR_NONE;
-      }
-      GPR_UNREACHABLE_CODE(return 0);
-    case GRPC_HTTP_BODY:
-      return addbyte_body(parser, byte);
-  }
-  GPR_UNREACHABLE_CODE(return 0);
-}
-
-void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type,
-                           void *request_or_response) {
-  memset(parser, 0, sizeof(*parser));
-  parser->state = GRPC_HTTP_FIRST_LINE;
-  parser->type = type;
-  parser->http.request_or_response = request_or_response;
-  parser->cur_line_end_length = 2;
-}
-
-void grpc_http_parser_destroy(grpc_http_parser *parser) {}
-
-void grpc_http_request_destroy(grpc_http_request *request) {
-  size_t i;
-  gpr_free(request->body);
-  for (i = 0; i < request->hdr_count; i++) {
-    gpr_free(request->hdrs[i].key);
-    gpr_free(request->hdrs[i].value);
-  }
-  gpr_free(request->hdrs);
-  gpr_free(request->method);
-  gpr_free(request->path);
-}
-
-void grpc_http_response_destroy(grpc_http_response *response) {
-  size_t i;
-  gpr_free(response->body);
-  for (i = 0; i < response->hdr_count; i++) {
-    gpr_free(response->hdrs[i].key);
-    gpr_free(response->hdrs[i].value);
-  }
-  gpr_free(response->hdrs);
-}
-
-grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, gpr_slice slice) {
-  size_t i;
-
-  for (i = 0; i < GPR_SLICE_LENGTH(slice); i++) {
-    grpc_error *err = addbyte(parser, GPR_SLICE_START_PTR(slice)[i]);
-    if (err != GRPC_ERROR_NONE) return err;
-  }
-
-  return GRPC_ERROR_NONE;
-}
-
-grpc_error *grpc_http_parser_eof(grpc_http_parser *parser) {
-  if (parser->state != GRPC_HTTP_BODY) {
-    return GRPC_ERROR_CREATE("Did not finish headers");
-  }
-  return GRPC_ERROR_NONE;
-}
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 540fb4fa7e40924a063d1e8f3a24f754f84a515b..149c55663c0167321a56f77aa5e581be2a747297 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -37,6 +37,7 @@
 #include <stdbool.h>
 #include <string.h>
 
+#include <grpc/status.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/avl.h>
 #include <grpc/support/log.h>
@@ -47,6 +48,8 @@
 #include <grpc/support/log_windows.h>
 #endif
 
+#include "src/core/lib/profiling/timers.h"
+
 static void destroy_integer(void *key) {}
 
 static void *copy_integer(void *key) { return key; }
@@ -115,6 +118,8 @@ static const char *error_int_name(grpc_error_ints key) {
       return "wsa_error";
     case GRPC_ERROR_INT_HTTP_STATUS:
       return "http_status";
+    case GRPC_ERROR_INT_LIMIT:
+      return "limit";
   }
   GPR_UNREACHABLE_CODE(return "unknown");
 }
@@ -169,8 +174,8 @@ static bool is_special(grpc_error *err) {
 grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
                            const char *func) {
   if (is_special(err)) return err;
-  gpr_log(GPR_DEBUG, "%p: %d -> %d [%s:%d %s]", err, err->refs.count,
-          err->refs.count + 1, file, line, func);
+  gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
+          err->refs.count, err->refs.count + 1, file, line, func);
   gpr_ref(&err->refs);
   return err;
 }
@@ -195,8 +200,8 @@ static void error_destroy(grpc_error *err) {
 void grpc_error_unref(grpc_error *err, const char *file, int line,
                       const char *func) {
   if (is_special(err)) return;
-  gpr_log(GPR_DEBUG, "%p: %d -> %d [%s:%d %s]", err, err->refs.count,
-          err->refs.count - 1, file, line, func);
+  gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
+          err->refs.count, err->refs.count - 1, file, line, func);
   if (gpr_unref(&err->refs)) {
     error_destroy(err);
   }
@@ -213,6 +218,7 @@ void grpc_error_unref(grpc_error *err) {
 grpc_error *grpc_error_create(const char *file, int line, const char *desc,
                               grpc_error **referencing,
                               size_t num_referencing) {
+  GPR_TIMER_BEGIN("grpc_error_create", 0);
   grpc_error *err = gpr_malloc(sizeof(*err));
   if (err == NULL) {  // TODO(ctiller): make gpr_malloc return NULL
     return GRPC_ERROR_OOM;
@@ -238,57 +244,91 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc,
                            (void *)(uintptr_t)GRPC_ERROR_TIME_CREATED,
                            box_time(gpr_now(GPR_CLOCK_REALTIME)));
   gpr_ref_init(&err->refs, 1);
+  GPR_TIMER_END("grpc_error_create", 0);
   return err;
 }
 
 static grpc_error *copy_error_and_unref(grpc_error *in) {
+  GPR_TIMER_BEGIN("copy_error_and_unref", 0);
+  grpc_error *out;
   if (is_special(in)) {
-    if (in == GRPC_ERROR_NONE) return GRPC_ERROR_CREATE("no error");
-    if (in == GRPC_ERROR_OOM) return GRPC_ERROR_CREATE("oom");
-    if (in == GRPC_ERROR_CANCELLED) return GRPC_ERROR_CREATE("cancelled");
-    return GRPC_ERROR_CREATE("unknown");
-  }
-  grpc_error *out = gpr_malloc(sizeof(*out));
+    if (in == GRPC_ERROR_NONE)
+      out = GRPC_ERROR_CREATE("no error");
+    else if (in == GRPC_ERROR_OOM)
+      out = GRPC_ERROR_CREATE("oom");
+    else if (in == GRPC_ERROR_CANCELLED)
+      out =
+          grpc_error_set_int(GRPC_ERROR_CREATE("cancelled"),
+                             GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED);
+    else
+      out = GRPC_ERROR_CREATE("unknown");
+  } else {
+    out = gpr_malloc(sizeof(*out));
 #ifdef GRPC_ERROR_REFCOUNT_DEBUG
-  gpr_log(GPR_DEBUG, "%p create copying", out);
+    gpr_log(GPR_DEBUG, "%p create copying", out);
 #endif
-  out->ints = gpr_avl_ref(in->ints);
-  out->strs = gpr_avl_ref(in->strs);
-  out->errs = gpr_avl_ref(in->errs);
-  out->times = gpr_avl_ref(in->times);
-  out->next_err = in->next_err;
-  gpr_ref_init(&out->refs, 1);
-  GRPC_ERROR_UNREF(in);
+    out->ints = gpr_avl_ref(in->ints);
+    out->strs = gpr_avl_ref(in->strs);
+    out->errs = gpr_avl_ref(in->errs);
+    out->times = gpr_avl_ref(in->times);
+    out->next_err = in->next_err;
+    gpr_ref_init(&out->refs, 1);
+    GRPC_ERROR_UNREF(in);
+  }
+  GPR_TIMER_END("copy_error_and_unref", 0);
   return out;
 }
 
 grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
                                intptr_t value) {
+  GPR_TIMER_BEGIN("grpc_error_set_int", 0);
   grpc_error *new = copy_error_and_unref(src);
   new->ints = gpr_avl_add(new->ints, (void *)(uintptr_t)which, (void *)value);
+  GPR_TIMER_END("grpc_error_set_int", 0);
   return new;
 }
 
 bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
+  GPR_TIMER_BEGIN("grpc_error_get_int", 0);
   void *pp;
+  if (is_special(err)) {
+    if (err == GRPC_ERROR_CANCELLED && which == GRPC_ERROR_INT_GRPC_STATUS) {
+      *p = GRPC_STATUS_CANCELLED;
+      GPR_TIMER_END("grpc_error_get_int", 0);
+      return true;
+    }
+    GPR_TIMER_END("grpc_error_get_int", 0);
+    return false;
+  }
   if (gpr_avl_maybe_get(err->ints, (void *)(uintptr_t)which, &pp)) {
     if (p != NULL) *p = (intptr_t)pp;
+    GPR_TIMER_END("grpc_error_get_int", 0);
     return true;
   }
+  GPR_TIMER_END("grpc_error_get_int", 0);
   return false;
 }
 
 grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
                                const char *value) {
+  GPR_TIMER_BEGIN("grpc_error_set_str", 0);
   grpc_error *new = copy_error_and_unref(src);
   new->strs =
       gpr_avl_add(new->strs, (void *)(uintptr_t)which, gpr_strdup(value));
+  GPR_TIMER_END("grpc_error_set_str", 0);
   return new;
 }
 
+const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) {
+  if (is_special(err)) return NULL;
+  return gpr_avl_get(err->strs, (void *)(uintptr_t)which);
+}
+
 grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
+  GPR_TIMER_BEGIN("grpc_error_add_child", 0);
   grpc_error *new = copy_error_and_unref(src);
   new->errs = gpr_avl_add(new->errs, (void *)(new->next_err++), child);
+  GPR_TIMER_END("grpc_error_add_child", 0);
   return new;
 }
 
@@ -480,6 +520,7 @@ void grpc_error_free_string(const char *str) {
 }
 
 const char *grpc_error_string(grpc_error *err) {
+  GPR_TIMER_BEGIN("grpc_error_string", 0);
   if (err == GRPC_ERROR_NONE) return no_error_string;
   if (err == GRPC_ERROR_OOM) return oom_error_string;
   if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string;
@@ -496,7 +537,9 @@ const char *grpc_error_string(grpc_error *err) {
 
   qsort(kvs.kvs, kvs.num_kvs, sizeof(kv_pair), cmp_kvs);
 
-  return finish_kvs(&kvs);
+  const char *out = finish_kvs(&kvs);
+  GPR_TIMER_END("grpc_error_string", 0);
+  return out;
 }
 
 grpc_error *grpc_os_error(const char *file, int line, int err,
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index 69cdf3028e4d009ed512c69d583d153ff567fe1c..13f898e31ad73402f0d75363db1a2451617ad383 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -92,6 +92,8 @@ typedef enum {
   GRPC_ERROR_INT_FD,
   /// HTTP status (i.e. 404)
   GRPC_ERROR_INT_HTTP_STATUS,
+  /// context sensitive limit associated with the error
+  GRPC_ERROR_INT_LIMIT,
 } grpc_error_ints;
 
 typedef enum {
@@ -163,23 +165,25 @@ void grpc_error_unref(grpc_error *err);
 #endif
 
 grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
-                               intptr_t value);
+                               intptr_t value) GRPC_MUST_USE_RESULT;
 bool grpc_error_get_int(grpc_error *error, grpc_error_ints which, intptr_t *p);
 grpc_error *grpc_error_set_time(grpc_error *src, grpc_error_times which,
-                                gpr_timespec value);
+                                gpr_timespec value) GRPC_MUST_USE_RESULT;
 grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
-                               const char *value);
+                               const char *value) GRPC_MUST_USE_RESULT;
+const char *grpc_error_get_str(grpc_error *error, grpc_error_strs which);
 /// Add a child error: an error that is believed to have contributed to this
 /// error occurring. Allows root causing high level errors from lower level
 /// errors that contributed to them.
-grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child);
+grpc_error *grpc_error_add_child(grpc_error *src,
+                                 grpc_error *child) GRPC_MUST_USE_RESULT;
 grpc_error *grpc_os_error(const char *file, int line, int err,
-                          const char *call_name);
+                          const char *call_name) GRPC_MUST_USE_RESULT;
 /// create an error associated with errno!=0 (an 'operating system' error)
 #define GRPC_OS_ERROR(err, call_name) \
   grpc_os_error(__FILE__, __LINE__, err, call_name)
 grpc_error *grpc_wsa_error(const char *file, int line, int err,
-                           const char *call_name);
+                           const char *call_name) GRPC_MUST_USE_RESULT;
 /// windows only: create an error associated with WSAGetLastError()!=0
 #define GRPC_WSA_ERROR(err, call_name) \
   grpc_wsa_error(__FILE__, __LINE__, err, call_name)
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
new file mode 100644
index 0000000000000000000000000000000000000000..cf0fe736a0b492b5c03fbfb81cfacc693a418726
--- /dev/null
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -0,0 +1,1872 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc_posix.h>
+#include <grpc/support/port_platform.h>
+
+/* This polling engine is only relevant on linux kernels supporting epoll() */
+#ifdef GPR_LINUX_EPOLL
+
+#include "src/core/lib/iomgr/ev_epoll_linux.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <poll.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/tls.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/block_annotate.h"
+
+/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
+static int grpc_polling_trace = 0; /* Disabled by default */
+#define GRPC_POLLING_TRACE(fmt, ...)       \
+  if (grpc_polling_trace) {                \
+    gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
+  }
+
+static int grpc_wakeup_signal = -1;
+static bool is_grpc_wakeup_signal_initialized = false;
+
+/* Implements the function defined in grpc_posix.h. This function might be
+ * called before even calling grpc_init() to set either a different signal to
+ * use. If signum == -1, then the use of signals is disabled */
+void grpc_use_signal(int signum) {
+  grpc_wakeup_signal = signum;
+  is_grpc_wakeup_signal_initialized = true;
+
+  if (grpc_wakeup_signal < 0) {
+    gpr_log(GPR_INFO,
+            "Use of signals is disabled. Epoll engine will not be used");
+  } else {
+    gpr_log(GPR_INFO, "epoll engine will be using signal: %d",
+            grpc_wakeup_signal);
+  }
+}
+
+struct polling_island;
+
+/*******************************************************************************
+ * Fd Declarations
+ */
+struct grpc_fd {
+  int fd;
+  /* refst format:
+       bit 0    : 1=Active / 0=Orphaned
+       bits 1-n : refcount
+     Ref/Unref by two to avoid altering the orphaned bit */
+  gpr_atm refst;
+
+  gpr_mu mu;
+
+  /* Indicates that the fd is shutdown and that any pending read/write closures
+     should fail */
+  bool shutdown;
+
+  /* The fd is either closed or we relinquished control of it. In either cases,
+     this indicates that the 'fd' on this structure is no longer valid */
+  bool orphaned;
+
+  /* TODO: sreek - Move this to a lockfree implementation */
+  grpc_closure *read_closure;
+  grpc_closure *write_closure;
+
+  /* The polling island to which this fd belongs to and the mutex protecting the
+     the field */
+  gpr_mu pi_mu;
+  struct polling_island *polling_island;
+
+  struct grpc_fd *freelist_next;
+  grpc_closure *on_done_closure;
+
+  /* The pollset that last noticed that the fd is readable */
+  grpc_pollset *read_notifier_pollset;
+
+  grpc_iomgr_object iomgr_object;
+};
+
+/* Reference counting for fds */
+// #define GRPC_FD_REF_COUNT_DEBUG
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
+static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+                     int line);
+#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
+#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
+#else
+static void fd_ref(grpc_fd *fd);
+static void fd_unref(grpc_fd *fd);
+#define GRPC_FD_REF(fd, reason) fd_ref(fd)
+#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
+#endif
+
+static void fd_global_init(void);
+static void fd_global_shutdown(void);
+
+#define CLOSURE_NOT_READY ((grpc_closure *)0)
+#define CLOSURE_READY ((grpc_closure *)1)
+
+/*******************************************************************************
+ * Polling island Declarations
+ */
+
+// #define GRPC_PI_REF_COUNT_DEBUG
+#ifdef GRPC_PI_REF_COUNT_DEBUG
+
+#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
+#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__)
+
+#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
+
+#define PI_ADD_REF(p, r) pi_add_ref((p))
+#define PI_UNREF(p, r) pi_unref((p))
+
+#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
+
+typedef struct polling_island {
+  gpr_mu mu;
+  /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
+     the refcount.
+     Once the ref count becomes zero, this structure is destroyed which means
+     we should ensure that there is never a scenario where a PI_ADD_REF() is
+     racing with a PI_UNREF() that just made the ref_count zero. */
+  gpr_refcount ref_count;
+
+  /* Pointer to the polling_island this merged into.
+   * merged_to value is only set once in polling_island's lifetime (and that too
+   * only if the island is merged with another island). Because of this, we can
+   * use gpr_atm type here so that we can do atomic access on this and reduce
+   * lock contention on 'mu' mutex.
+   *
+   * Note that if this field is not NULL (i.e not 0), all the remaining fields
+   * (except mu and ref_count) are invalid and must be ignored. */
+  gpr_atm merged_to;
+
+  /* The fd of the underlying epoll set */
+  int epoll_fd;
+
+  /* The file descriptors in the epoll set */
+  size_t fd_cnt;
+  size_t fd_capacity;
+  grpc_fd **fds;
+
+  /* Polling islands that are no longer needed are kept in a freelist so that
+     they can be reused. This field points to the next polling island in the
+     free list */
+  struct polling_island *next_free;
+} polling_island;
+
+/*******************************************************************************
+ * Pollset Declarations
+ */
+struct grpc_pollset_worker {
+  /* Thread id of this worker */
+  pthread_t pt_id;
+
+  /* Used to prevent a worker from getting kicked multiple times */
+  gpr_atm is_kicked;
+  struct grpc_pollset_worker *next;
+  struct grpc_pollset_worker *prev;
+};
+
+struct grpc_pollset {
+  gpr_mu mu;
+  grpc_pollset_worker root_worker;
+  bool kicked_without_pollers;
+
+  bool shutting_down;          /* Is the pollset shutting down ? */
+  bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
+  grpc_closure *shutdown_done; /* Called after after shutdown is complete */
+
+  /* The polling island to which this pollset belongs to */
+  struct polling_island *polling_island;
+};
+
+/*******************************************************************************
+ * Pollset-set Declarations
+ */
+/* TODO: sreek - Change the pollset_set implementation such that a pollset_set
+ * directly points to a polling_island (and adding an fd/pollset/pollset_set to
+ * the current pollset_set would result in polling island merges. This would
+ * remove the need to maintain fd_count here. This will also significantly
+ * simplify the grpc_fd structure since we would no longer need to explicitly
+ * maintain the orphaned state */
+struct grpc_pollset_set {
+  gpr_mu mu;
+
+  size_t pollset_count;
+  size_t pollset_capacity;
+  grpc_pollset **pollsets;
+
+  size_t pollset_set_count;
+  size_t pollset_set_capacity;
+  struct grpc_pollset_set **pollset_sets;
+
+  size_t fd_count;
+  size_t fd_capacity;
+  grpc_fd **fds;
+};
+
+/*******************************************************************************
+ * Common helpers
+ */
+
+static void append_error(grpc_error **composite, grpc_error *error,
+                         const char *desc) {
+  if (error == GRPC_ERROR_NONE) return;
+  if (*composite == GRPC_ERROR_NONE) {
+    *composite = GRPC_ERROR_CREATE(desc);
+  }
+  *composite = grpc_error_add_child(*composite, error);
+}
+
+/*******************************************************************************
+ * Polling island Definitions
+ */
+
+/* The wakeup fd that is used to wake up all threads in a Polling island. This
+   is useful in the polling island merge operation where we need to wakeup all
+   the threads currently polling the smaller polling island (so that they can
+   start polling the new/merged polling island)
+
+   NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the
+   threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
+static grpc_wakeup_fd polling_island_wakeup_fd;
+
+/* Polling island freelist */
+static gpr_mu g_pi_freelist_mu;
+static polling_island *g_pi_freelist = NULL;
+
+static void polling_island_delete(); /* Forward declaration */
+
+#ifdef GRPC_TSAN
+/* Currently TSAN may incorrectly flag data races between epoll_ctl and
+   epoll_wait for any grpc_fd structs that are added to the epoll set via
+   epoll_ctl and are returned (within a very short window) via epoll_wait().
+
+   To work-around this race, we establish a happens-before relation between
+   the code just-before epoll_ctl() and the code after epoll_wait() by using
+   this atomic */
+gpr_atm g_epoll_sync;
+#endif /* defined(GRPC_TSAN) */
+
+#ifdef GRPC_PI_REF_COUNT_DEBUG
+void pi_add_ref(polling_island *pi);
+void pi_unref(polling_island *pi);
+
+void pi_add_ref_dbg(polling_island *pi, char *reason, char *file, int line) {
+  long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count));
+  pi_add_ref(pi);
+  gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
+          (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
+}
+
+void pi_unref_dbg(polling_island *pi, char *reason, char *file, int line) {
+  long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count));
+  pi_unref(pi);
+  gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
+          (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
+}
+#endif
+
+void pi_add_ref(polling_island *pi) { gpr_ref(&pi->ref_count); }
+
+void pi_unref(polling_island *pi) {
+  /* If ref count went to zero, delete the polling island.
+     Note that this deletion not be done under a lock. Once the ref count goes
+     to zero, we are guaranteed that no one else holds a reference to the
+     polling island (and that there is no racing pi_add_ref() call either).
+
+     Also, if we are deleting the polling island and the merged_to field is
+     non-empty, we should remove a ref to the merged_to polling island
+   */
+  if (gpr_unref(&pi->ref_count)) {
+    polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+    polling_island_delete(pi);
+    if (next != NULL) {
+      PI_UNREF(next, "pi_delete"); /* Recursive call */
+    }
+  }
+}
+
+/* The caller is expected to hold pi->mu lock before calling this function */
+static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
+                                          size_t fd_count, bool add_fd_refs,
+                                          grpc_error **error) {
+  int err;
+  size_t i;
+  struct epoll_event ev;
+  char *err_msg;
+  const char *err_desc = "polling_island_add_fds";
+
+#ifdef GRPC_TSAN
+  /* See the definition of g_epoll_sync for more context */
+  gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
+#endif /* defined(GRPC_TSAN) */
+
+  for (i = 0; i < fd_count; i++) {
+    ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+    ev.data.ptr = fds[i];
+    err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
+
+    if (err < 0) {
+      if (errno != EEXIST) {
+        gpr_asprintf(
+            &err_msg,
+            "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
+            pi->epoll_fd, fds[i]->fd, errno, strerror(errno));
+        append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+        gpr_free(err_msg);
+      }
+
+      continue;
+    }
+
+    if (pi->fd_cnt == pi->fd_capacity) {
+      pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
+      pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
+    }
+
+    pi->fds[pi->fd_cnt++] = fds[i];
+    if (add_fd_refs) {
+      GRPC_FD_REF(fds[i], "polling_island");
+    }
+  }
+}
+
+/* The caller is expected to hold pi->mu before calling this */
+static void polling_island_add_wakeup_fd_locked(polling_island *pi,
+                                                grpc_wakeup_fd *wakeup_fd,
+                                                grpc_error **error) {
+  struct epoll_event ev;
+  int err;
+  char *err_msg;
+  const char *err_desc = "polling_island_add_wakeup_fd";
+
+  ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+  ev.data.ptr = wakeup_fd;
+  err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
+                  GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
+  if (err < 0 && errno != EEXIST) {
+    gpr_asprintf(&err_msg,
+                 "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
+                 "error: %d (%s)",
+                 pi->epoll_fd,
+                 GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), errno,
+                 strerror(errno));
+    append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+    gpr_free(err_msg);
+  }
+}
+
+/* The caller is expected to hold pi->mu lock before calling this function */
+static void polling_island_remove_all_fds_locked(polling_island *pi,
+                                                 bool remove_fd_refs,
+                                                 grpc_error **error) {
+  int err;
+  size_t i;
+  char *err_msg;
+  const char *err_desc = "polling_island_remove_fds";
+
+  for (i = 0; i < pi->fd_cnt; i++) {
+    err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
+    if (err < 0 && errno != ENOENT) {
+      gpr_asprintf(&err_msg,
+                   "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with "
+                   "error: %d (%s)",
+                   pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno));
+      append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+      gpr_free(err_msg);
+    }
+
+    if (remove_fd_refs) {
+      GRPC_FD_UNREF(pi->fds[i], "polling_island");
+    }
+  }
+
+  pi->fd_cnt = 0;
+}
+
+/* The caller is expected to hold pi->mu lock before calling this function */
+static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
+                                            bool is_fd_closed,
+                                            grpc_error **error) {
+  int err;
+  size_t i;
+  char *err_msg;
+  const char *err_desc = "polling_island_remove_fd";
+
+  /* If fd is already closed, then it would have been automatically been removed
+     from the epoll set */
+  if (!is_fd_closed) {
+    err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
+    if (err < 0 && errno != ENOENT) {
+      gpr_asprintf(
+          &err_msg,
+          "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
+          pi->epoll_fd, fd->fd, errno, strerror(errno));
+      append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+      gpr_free(err_msg);
+    }
+  }
+
+  for (i = 0; i < pi->fd_cnt; i++) {
+    if (pi->fds[i] == fd) {
+      pi->fds[i] = pi->fds[--pi->fd_cnt];
+      GRPC_FD_UNREF(fd, "polling_island");
+      break;
+    }
+  }
+}
+
+/* Might return NULL in case of an error */
+static polling_island *polling_island_create(grpc_fd *initial_fd,
+                                             grpc_error **error) {
+  polling_island *pi = NULL;
+  char *err_msg;
+  const char *err_desc = "polling_island_create";
+
+  /* Try to get one from the polling island freelist */
+  gpr_mu_lock(&g_pi_freelist_mu);
+  if (g_pi_freelist != NULL) {
+    pi = g_pi_freelist;
+    g_pi_freelist = g_pi_freelist->next_free;
+    pi->next_free = NULL;
+  }
+  gpr_mu_unlock(&g_pi_freelist_mu);
+
+  /* Create new polling island if we could not get one from the free list */
+  if (pi == NULL) {
+    pi = gpr_malloc(sizeof(*pi));
+    gpr_mu_init(&pi->mu);
+    pi->fd_cnt = 0;
+    pi->fd_capacity = 0;
+    pi->fds = NULL;
+  }
+
+  gpr_ref_init(&pi->ref_count, 0);
+  gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
+
+  pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+
+  if (pi->epoll_fd < 0) {
+    gpr_asprintf(&err_msg, "epoll_create1 failed with error %d (%s)", errno,
+                 strerror(errno));
+    append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+    gpr_free(err_msg);
+  } else {
+    polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
+    pi->next_free = NULL;
+
+    if (initial_fd != NULL) {
+      /* Lock the polling island here just in case we got this structure from
+         the freelist and the polling island lock was not released yet (by the
+         code that adds the polling island to the freelist) */
+      gpr_mu_lock(&pi->mu);
+      polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
+      gpr_mu_unlock(&pi->mu);
+    }
+  }
+
+  return pi;
+}
+
+static void polling_island_delete(polling_island *pi) {
+  GPR_ASSERT(pi->fd_cnt == 0);
+
+  gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
+
+  close(pi->epoll_fd);
+  pi->epoll_fd = -1;
+
+  gpr_mu_lock(&g_pi_freelist_mu);
+  pi->next_free = g_pi_freelist;
+  g_pi_freelist = pi;
+  gpr_mu_unlock(&g_pi_freelist_mu);
+}
+
+/* Attempts to gets the last polling island in the linked list (liked by the
+ * 'merged_to' field). Since this does not lock the polling island, there are no
+ * guarantees that the island returned is the last island */
+static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
+  polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+  while (next != NULL) {
+    pi = next;
+    next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+  }
+
+  return pi;
+}
+
+/* Gets the lock on the *latest* polling island i.e the last polling island in
+   the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the
+   returned polling island's mu.
+   Usage: To lock/unlock polling island "pi", do the following:
+      polling_island *pi_latest = polling_island_lock(pi);
+      ...
+      ... critical section ..
+      ...
+      gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
+static polling_island *polling_island_lock(polling_island *pi) {
+  polling_island *next = NULL;
+
+  while (true) {
+    next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+    if (next == NULL) {
+      /* Looks like 'pi' is the last node in the linked list but unless we check
+         this by holding the pi->mu lock, we cannot be sure (i.e without the
+         pi->mu lock, we don't prevent island merges).
+         To be absolutely sure, check once more by holding the pi->mu lock */
+      gpr_mu_lock(&pi->mu);
+      next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+      if (next == NULL) {
+        /* pi is infact the last node and we have the pi->mu lock. we're done */
+        break;
+      }
+
+      /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu
+       * isn't the lock we are interested in. Continue traversing the list */
+      gpr_mu_unlock(&pi->mu);
+    }
+
+    pi = next;
+  }
+
+  return pi;
+}
+
+/* Gets the lock on the *latest* polling islands in the linked lists pointed by
+   *p and *q (and also updates *p and *q to point to the latest polling islands)
+
+   This function is needed because calling the following block of code to obtain
+   locks on polling islands (*p and *q) is prone to deadlocks.
+     {
+       polling_island_lock(*p, true);
+       polling_island_lock(*q, true);
+     }
+
+   Usage/example:
+     polling_island *p1;
+     polling_island *p2;
+     ..
+     polling_island_lock_pair(&p1, &p2);
+     ..
+     .. Critical section with both p1 and p2 locked
+     ..
+     // Release locks: Always call polling_island_unlock_pair() to release locks
+     polling_island_unlock_pair(p1, p2);
+*/
+static void polling_island_lock_pair(polling_island **p, polling_island **q) {
+  polling_island *pi_1 = *p;
+  polling_island *pi_2 = *q;
+  polling_island *next_1 = NULL;
+  polling_island *next_2 = NULL;
+
+  /* The algorithm is simple:
+      - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
+        keep updating pi_1 and pi_2)
+      - Then obtain locks on the islands by following a lock order rule of
+        locking polling_island with lower address first
+           Special case: Before obtaining the locks, check if pi_1 and pi_2 are
+           pointing to the same island. If that is the case, we can just call
+           polling_island_lock()
+      - After obtaining both the locks, double check that the polling islands
+        are still the last polling islands in their respective linked lists
+        (this is because there might have been polling island merges before
+        we got the lock)
+      - If the polling islands are the last islands, we are done. If not,
+        release the locks and continue the process from the first step */
+  while (true) {
+    next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
+    while (next_1 != NULL) {
+      pi_1 = next_1;
+      next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
+    }
+
+    next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
+    while (next_2 != NULL) {
+      pi_2 = next_2;
+      next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
+    }
+
+    if (pi_1 == pi_2) {
+      pi_1 = pi_2 = polling_island_lock(pi_1);
+      break;
+    }
+
+    if (pi_1 < pi_2) {
+      gpr_mu_lock(&pi_1->mu);
+      gpr_mu_lock(&pi_2->mu);
+    } else {
+      gpr_mu_lock(&pi_2->mu);
+      gpr_mu_lock(&pi_1->mu);
+    }
+
+    next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
+    next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
+    if (next_1 == NULL && next_2 == NULL) {
+      break;
+    }
+
+    gpr_mu_unlock(&pi_1->mu);
+    gpr_mu_unlock(&pi_2->mu);
+  }
+
+  *p = pi_1;
+  *q = pi_2;
+}
+
+static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
+  if (p == q) {
+    gpr_mu_unlock(&p->mu);
+  } else {
+    gpr_mu_unlock(&p->mu);
+    gpr_mu_unlock(&q->mu);
+  }
+}
+
+static polling_island *polling_island_merge(polling_island *p,
+                                            polling_island *q,
+                                            grpc_error **error) {
+  /* Get locks on both the polling islands */
+  polling_island_lock_pair(&p, &q);
+
+  if (p != q) {
+    /* Make sure that p points to the polling island with fewer fds than q */
+    if (p->fd_cnt > q->fd_cnt) {
+      GPR_SWAP(polling_island *, p, q);
+    }
+
+    /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
+       Note that the refcounts on the fds being moved will not change here.
+       This is why the last param in the following two functions is 'false') */
+    polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error);
+    polling_island_remove_all_fds_locked(p, false, error);
+
+    /* Wakeup all the pollers (if any) on p so that they pickup this change */
+    polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error);
+
+    /* Add the 'merged_to' link from p --> q */
+    gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
+    PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
+  }
+  /* else if p == q, nothing needs to be done */
+
+  polling_island_unlock_pair(p, q);
+
+  /* Return the merged polling island (Note that no merge would have happened
+     if p == q which is ok) */
+  return q;
+}
+
+static grpc_error *polling_island_global_init() {
+  grpc_error *error = GRPC_ERROR_NONE;
+
+  gpr_mu_init(&g_pi_freelist_mu);
+  g_pi_freelist = NULL;
+
+  error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
+  if (error == GRPC_ERROR_NONE) {
+    error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
+  }
+
+  return error;
+}
+
+static void polling_island_global_shutdown() {
+  polling_island *next;
+  gpr_mu_lock(&g_pi_freelist_mu);
+  gpr_mu_unlock(&g_pi_freelist_mu);
+  while (g_pi_freelist != NULL) {
+    next = g_pi_freelist->next_free;
+    gpr_mu_destroy(&g_pi_freelist->mu);
+    gpr_free(g_pi_freelist->fds);
+    gpr_free(g_pi_freelist);
+    g_pi_freelist = next;
+  }
+  gpr_mu_destroy(&g_pi_freelist_mu);
+
+  grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
+}
+
+/*******************************************************************************
+ * Fd Definitions
+ */
+
+/* We need to keep a freelist not because of any concerns of malloc performance
+ * but instead so that implementations with multiple threads in (for example)
+ * epoll_wait deal with the race between pollset removal and incoming poll
+ * notifications.
+ *
+ * The problem is that the poller ultimately holds a reference to this
+ * object, so it is very difficult to know when is safe to free it, at least
+ * without some expensive synchronization.
+ *
+ * If we keep the object freelisted, in the worst case losing this race just
+ * becomes a spurious read notification on a reused fd.
+ */
+
+/* The alarm system needs to be able to wakeup 'some poller' sometimes
+ * (specifically when a new alarm needs to be triggered earlier than the next
+ * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
+ * case occurs. */
+
+/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
+ * sure to wake up one polling thread (which can wake up other threads if
+ * needed) */
+grpc_wakeup_fd grpc_global_wakeup_fd;
+
+static grpc_fd *fd_freelist = NULL;
+static gpr_mu fd_freelist_mu;
+
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
+#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
+static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+                   int line) {
+  gpr_log(GPR_DEBUG, "FD %d %p   ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
+          (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
+          gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+#else
+#define REF_BY(fd, n, reason) ref_by(fd, n)
+#define UNREF_BY(fd, n, reason) unref_by(fd, n)
+static void ref_by(grpc_fd *fd, int n) {
+#endif
+  GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
+}
+
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+                     int line) {
+  gpr_atm old;
+  gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
+          (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
+          gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+#else
+static void unref_by(grpc_fd *fd, int n) {
+  gpr_atm old;
+#endif
+  old = gpr_atm_full_fetch_add(&fd->refst, -n);
+  if (old == n) {
+    /* Add the fd to the freelist */
+    gpr_mu_lock(&fd_freelist_mu);
+    fd->freelist_next = fd_freelist;
+    fd_freelist = fd;
+    grpc_iomgr_unregister_object(&fd->iomgr_object);
+
+    gpr_mu_unlock(&fd_freelist_mu);
+  } else {
+    GPR_ASSERT(old > n);
+  }
+}
+
+/* Increment refcount by two to avoid changing the orphan bit */
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
+                   int line) {
+  ref_by(fd, 2, reason, file, line);
+}
+
+static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+                     int line) {
+  unref_by(fd, 2, reason, file, line);
+}
+#else
+static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
+static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
+#endif
+
+static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
+
+static void fd_global_shutdown(void) {
+  gpr_mu_lock(&fd_freelist_mu);
+  gpr_mu_unlock(&fd_freelist_mu);
+  while (fd_freelist != NULL) {
+    grpc_fd *fd = fd_freelist;
+    fd_freelist = fd_freelist->freelist_next;
+    gpr_mu_destroy(&fd->mu);
+    gpr_free(fd);
+  }
+  gpr_mu_destroy(&fd_freelist_mu);
+}
+
+static grpc_fd *fd_create(int fd, const char *name) {
+  grpc_fd *new_fd = NULL;
+
+  gpr_mu_lock(&fd_freelist_mu);
+  if (fd_freelist != NULL) {
+    new_fd = fd_freelist;
+    fd_freelist = fd_freelist->freelist_next;
+  }
+  gpr_mu_unlock(&fd_freelist_mu);
+
+  if (new_fd == NULL) {
+    new_fd = gpr_malloc(sizeof(grpc_fd));
+    gpr_mu_init(&new_fd->mu);
+    gpr_mu_init(&new_fd->pi_mu);
+  }
+
+  /* Note: It is not really needed to get the new_fd->mu lock here. If this is a
+     newly created fd (or an fd we got from the freelist), no one else would be
+     holding a lock to it anyway. */
+  gpr_mu_lock(&new_fd->mu);
+
+  gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
+  new_fd->fd = fd;
+  new_fd->shutdown = false;
+  new_fd->orphaned = false;
+  new_fd->read_closure = CLOSURE_NOT_READY;
+  new_fd->write_closure = CLOSURE_NOT_READY;
+  new_fd->polling_island = NULL;
+  new_fd->freelist_next = NULL;
+  new_fd->on_done_closure = NULL;
+  new_fd->read_notifier_pollset = NULL;
+
+  gpr_mu_unlock(&new_fd->mu);
+
+  char *fd_name;
+  gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
+  grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+  gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
+#endif
+  gpr_free(fd_name);
+  return new_fd;
+}
+
+static bool fd_is_orphaned(grpc_fd *fd) {
+  return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
+}
+
+static int fd_wrapped_fd(grpc_fd *fd) {
+  int ret_fd = -1;
+  gpr_mu_lock(&fd->mu);
+  if (!fd->orphaned) {
+    ret_fd = fd->fd;
+  }
+  gpr_mu_unlock(&fd->mu);
+
+  return ret_fd;
+}
+
+static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+                      grpc_closure *on_done, int *release_fd,
+                      const char *reason) {
+  bool is_fd_closed = false;
+  grpc_error *error = GRPC_ERROR_NONE;
+
+  gpr_mu_lock(&fd->mu);
+  fd->on_done_closure = on_done;
+
+  /* If release_fd is not NULL, we should be relinquishing control of the file
+     descriptor fd->fd (but we still own the grpc_fd structure). */
+  if (release_fd != NULL) {
+    *release_fd = fd->fd;
+  } else {
+    close(fd->fd);
+    is_fd_closed = true;
+  }
+
+  fd->orphaned = true;
+
+  /* Remove the active status but keep referenced. We want this grpc_fd struct
+     to be alive (and not added to freelist) until the end of this function */
+  REF_BY(fd, 1, reason);
+
+  /* Remove the fd from the polling island:
+     - Get a lock on the latest polling island (i.e the last island in the
+       linked list pointed by fd->polling_island). This is the island that
+       would actually contain the fd
+     - Remove the fd from the latest polling island
+     - Unlock the latest polling island
+     - Set fd->polling_island to NULL (but remove the ref on the polling island
+       before doing this.) */
+  gpr_mu_lock(&fd->pi_mu);
+  if (fd->polling_island != NULL) {
+    polling_island *pi_latest = polling_island_lock(fd->polling_island);
+    polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error);
+    gpr_mu_unlock(&pi_latest->mu);
+
+    PI_UNREF(fd->polling_island, "fd_orphan");
+    fd->polling_island = NULL;
+  }
+  gpr_mu_unlock(&fd->pi_mu);
+
+  grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, error, NULL);
+
+  gpr_mu_unlock(&fd->mu);
+  UNREF_BY(fd, 2, reason); /* Drop the reference */
+  GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
+}
+
+static grpc_error *fd_shutdown_error(bool shutdown) {
+  if (!shutdown) {
+    return GRPC_ERROR_NONE;
+  } else {
+    return GRPC_ERROR_CREATE("FD shutdown");
+  }
+}
+
+static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+                             grpc_closure **st, grpc_closure *closure) {
+  if (fd->shutdown) {
+    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
+                        NULL);
+  } else if (*st == CLOSURE_NOT_READY) {
+    /* not ready ==> switch to a waiting state by setting the closure */
+    *st = closure;
+  } else if (*st == CLOSURE_READY) {
+    /* already ready ==> queue the closure to run immediately */
+    *st = CLOSURE_NOT_READY;
+    grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
+                        NULL);
+  } else {
+    /* upcallptr was set to a different closure.  This is an error! */
+    gpr_log(GPR_ERROR,
+            "User called a notify_on function with a previous callback still "
+            "pending");
+    abort();
+  }
+}
+
+/* returns 1 if state becomes not ready */
+static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+                            grpc_closure **st) {
+  if (*st == CLOSURE_READY) {
+    /* duplicate ready ==> ignore */
+    return 0;
+  } else if (*st == CLOSURE_NOT_READY) {
+    /* not ready, and not waiting ==> flag ready */
+    *st = CLOSURE_READY;
+    return 0;
+  } else {
+    /* waiting ==> queue closure */
+    grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+    *st = CLOSURE_NOT_READY;
+    return 1;
+  }
+}
+
+static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+                                                  grpc_fd *fd) {
+  grpc_pollset *notifier = NULL;
+
+  gpr_mu_lock(&fd->mu);
+  notifier = fd->read_notifier_pollset;
+  gpr_mu_unlock(&fd->mu);
+
+  return notifier;
+}
+
+static bool fd_is_shutdown(grpc_fd *fd) {
+  gpr_mu_lock(&fd->mu);
+  const bool r = fd->shutdown;
+  gpr_mu_unlock(&fd->mu);
+  return r;
+}
+
+/* Might be called multiple times */
+static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+  gpr_mu_lock(&fd->mu);
+  /* Do the actual shutdown only once */
+  if (!fd->shutdown) {
+    fd->shutdown = true;
+
+    shutdown(fd->fd, SHUT_RDWR);
+    /* Flush any pending read and write closures. Since fd->shutdown is 'true'
+       at this point, the closures would be called with 'success = false' */
+    set_ready_locked(exec_ctx, fd, &fd->read_closure);
+    set_ready_locked(exec_ctx, fd, &fd->write_closure);
+  }
+  gpr_mu_unlock(&fd->mu);
+}
+
+static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+                              grpc_closure *closure) {
+  gpr_mu_lock(&fd->mu);
+  notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
+  gpr_mu_unlock(&fd->mu);
+}
+
+static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+                               grpc_closure *closure) {
+  gpr_mu_lock(&fd->mu);
+  notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
+  gpr_mu_unlock(&fd->mu);
+}
+
+/*******************************************************************************
+ * Pollset Definitions
+ */
+GPR_TLS_DECL(g_current_thread_pollset);
+GPR_TLS_DECL(g_current_thread_worker);
+static __thread bool g_initialized_sigmask;
+static __thread sigset_t g_orig_sigmask;
+
+static void sig_handler(int sig_num) {
+#ifdef GRPC_EPOLL_DEBUG
+  gpr_log(GPR_INFO, "Received signal %d", sig_num);
+#endif
+}
+
+static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
+
+/* Global state management */
+static grpc_error *pollset_global_init(void) {
+  gpr_tls_init(&g_current_thread_pollset);
+  gpr_tls_init(&g_current_thread_worker);
+  poller_kick_init();
+  return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
+}
+
+static void pollset_global_shutdown(void) {
+  grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
+  gpr_tls_destroy(&g_current_thread_pollset);
+  gpr_tls_destroy(&g_current_thread_worker);
+}
+
+static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
+  grpc_error *err = GRPC_ERROR_NONE;
+
+  /* Kick the worker only if it was not already kicked */
+  if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) {
+    GRPC_POLLING_TRACE(
+        "pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
+        (void *)worker, worker->pt_id);
+    int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
+    if (err_num != 0) {
+      err = GRPC_OS_ERROR(err_num, "pthread_kill");
+    }
+  }
+  return err;
+}
+
+/* Return 1 if the pollset has active threads in pollset_work (pollset must
+ * be locked) */
+static int pollset_has_workers(grpc_pollset *p) {
+  return p->root_worker.next != &p->root_worker;
+}
+
+static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+  worker->prev->next = worker->next;
+  worker->next->prev = worker->prev;
+}
+
+static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
+  if (pollset_has_workers(p)) {
+    grpc_pollset_worker *w = p->root_worker.next;
+    remove_worker(p, w);
+    return w;
+  } else {
+    return NULL;
+  }
+}
+
+static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+  worker->next = &p->root_worker;
+  worker->prev = worker->next->prev;
+  worker->prev->next = worker->next->prev = worker;
+}
+
+static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+  worker->prev = &p->root_worker;
+  worker->next = worker->prev->next;
+  worker->prev->next = worker->next->prev = worker;
+}
+
+/* p->mu must be held before calling this function */
+static grpc_error *pollset_kick(grpc_pollset *p,
+                                grpc_pollset_worker *specific_worker) {
+  GPR_TIMER_BEGIN("pollset_kick", 0);
+  grpc_error *error = GRPC_ERROR_NONE;
+  const char *err_desc = "Kick Failure";
+  grpc_pollset_worker *worker = specific_worker;
+  if (worker != NULL) {
+    if (worker == GRPC_POLLSET_KICK_BROADCAST) {
+      if (pollset_has_workers(p)) {
+        GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
+        for (worker = p->root_worker.next; worker != &p->root_worker;
+             worker = worker->next) {
+          if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
+            append_error(&error, pollset_worker_kick(worker), err_desc);
+          }
+        }
+        GPR_TIMER_END("pollset_kick.broadcast", 0);
+      } else {
+        p->kicked_without_pollers = true;
+      }
+    } else {
+      GPR_TIMER_MARK("kicked_specifically", 0);
+      if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
+        append_error(&error, pollset_worker_kick(worker), err_desc);
+      }
+    }
+  } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
+    /* Since worker == NULL, it means that we can kick "any" worker on this
+       pollset 'p'. If 'p' happens to be the same pollset this thread is
+       currently polling (i.e in pollset_work() function), then there is no need
+       to kick any other worker since the current thread can just absorb the
+       kick. This is the reason why we enter this case only when
+       g_current_thread_pollset is != p */
+
+    GPR_TIMER_MARK("kick_anonymous", 0);
+    worker = pop_front_worker(p);
+    if (worker != NULL) {
+      GPR_TIMER_MARK("finally_kick", 0);
+      push_back_worker(p, worker);
+      append_error(&error, pollset_worker_kick(worker), err_desc);
+    } else {
+      GPR_TIMER_MARK("kicked_no_pollers", 0);
+      p->kicked_without_pollers = true;
+    }
+  }
+
+  GPR_TIMER_END("pollset_kick", 0);
+  GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
+  return error;
+}
+
+static grpc_error *kick_poller(void) {
+  return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
+}
+
+static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+  gpr_mu_init(&pollset->mu);
+  *mu = &pollset->mu;
+
+  pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
+  pollset->kicked_without_pollers = false;
+
+  pollset->shutting_down = false;
+  pollset->finish_shutdown_called = false;
+  pollset->shutdown_done = NULL;
+
+  pollset->polling_island = NULL;
+}
+
+/* Convert a timespec to milliseconds:
+   - Very small or negative poll times are clamped to zero to do a non-blocking
+     poll (which becomes spin polling)
+   - Other small values are rounded up to one millisecond
+   - Longer than a millisecond polls are rounded up to the next nearest
+     millisecond to avoid spinning
+   - Infinite timeouts are converted to -1 */
+static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
+                                           gpr_timespec now) {
+  gpr_timespec timeout;
+  static const int64_t max_spin_polling_us = 10;
+  if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
+    return -1;
+  }
+
+  if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
+                                                   max_spin_polling_us,
+                                                   GPR_TIMESPAN))) <= 0) {
+    return 0;
+  }
+  timeout = gpr_time_sub(deadline, now);
+  return gpr_time_to_millis(gpr_time_add(
+      timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+}
+
+static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+                               grpc_pollset *notifier) {
+  /* Need the fd->mu since we might be racing with fd_notify_on_read */
+  gpr_mu_lock(&fd->mu);
+  set_ready_locked(exec_ctx, fd, &fd->read_closure);
+  fd->read_notifier_pollset = notifier;
+  gpr_mu_unlock(&fd->mu);
+}
+
+static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+  /* Need the fd->mu since we might be racing with fd_notify_on_write */
+  gpr_mu_lock(&fd->mu);
+  set_ready_locked(exec_ctx, fd, &fd->write_closure);
+  gpr_mu_unlock(&fd->mu);
+}
+
+static void pollset_release_polling_island(grpc_pollset *ps, char *reason) {
+  if (ps->polling_island != NULL) {
+    PI_UNREF(ps->polling_island, reason);
+  }
+  ps->polling_island = NULL;
+}
+
+static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
+                                   grpc_pollset *pollset) {
+  /* The pollset cannot have any workers if we are at this stage */
+  GPR_ASSERT(!pollset_has_workers(pollset));
+
+  pollset->finish_shutdown_called = true;
+
+  /* Release the ref and set pollset->polling_island to NULL */
+  pollset_release_polling_island(pollset, "ps_shutdown");
+  grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+}
+
+/* pollset->mu lock must be held by the caller before calling this */
+static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+                             grpc_closure *closure) {
+  GPR_TIMER_BEGIN("pollset_shutdown", 0);
+  GPR_ASSERT(!pollset->shutting_down);
+  pollset->shutting_down = true;
+  pollset->shutdown_done = closure;
+  pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+
+  /* If the pollset has any workers, we cannot call finish_shutdown_locked()
+     because it would release the underlying polling island. In such a case, we
+     let the last worker call finish_shutdown_locked() from pollset_work() */
+  if (!pollset_has_workers(pollset)) {
+    GPR_ASSERT(!pollset->finish_shutdown_called);
+    GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
+    finish_shutdown_locked(exec_ctx, pollset);
+  }
+  GPR_TIMER_END("pollset_shutdown", 0);
+}
+
+/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
+ * than destroying the mutexes, there is nothing special that needs to be done
+ * here */
+static void pollset_destroy(grpc_pollset *pollset) {
+  GPR_ASSERT(!pollset_has_workers(pollset));
+  gpr_mu_destroy(&pollset->mu);
+}
+
+static void pollset_reset(grpc_pollset *pollset) {
+  GPR_ASSERT(pollset->shutting_down);
+  GPR_ASSERT(!pollset_has_workers(pollset));
+  pollset->shutting_down = false;
+  pollset->finish_shutdown_called = false;
+  pollset->kicked_without_pollers = false;
+  pollset->shutdown_done = NULL;
+  pollset_release_polling_island(pollset, "ps_reset");
+}
+
+#define GRPC_EPOLL_MAX_EVENTS 1000
+/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
+static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
+                                    grpc_pollset *pollset,
+                                    grpc_pollset_worker *worker, int timeout_ms,
+                                    sigset_t *sig_mask, grpc_error **error) {
+  struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
+  int epoll_fd = -1;
+  int ep_rv;
+  polling_island *pi = NULL;
+  char *err_msg;
+  const char *err_desc = "pollset_work_and_unlock";
+  GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
+
+  /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
+     latest polling island pointed by pollset->polling_island.
+
+     Since epoll_fd is immutable, we can read it without obtaining the polling
+     island lock. There is however a possibility that the polling island (from
+     which we got the epoll_fd) got merged with another island while we are
+     in this function. This is still okay because in such a case, we will wakeup
+     right-away from epoll_wait() and pick up the latest polling_island the next
+     this function (i.e pollset_work_and_unlock()) is called */
+
+  if (pollset->polling_island == NULL) {
+    pollset->polling_island = polling_island_create(NULL, error);
+    if (pollset->polling_island == NULL) {
+      GPR_TIMER_END("pollset_work_and_unlock", 0);
+      return; /* Fatal error. We cannot continue */
+    }
+
+    PI_ADD_REF(pollset->polling_island, "ps");
+    GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
+                       (void *)pollset, (void *)pollset->polling_island);
+  }
+
+  pi = polling_island_maybe_get_latest(pollset->polling_island);
+  epoll_fd = pi->epoll_fd;
+
+  /* Update the pollset->polling_island since the island being pointed by
+     pollset->polling_island maybe older than the one pointed by pi) */
+  if (pollset->polling_island != pi) {
+    /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
+       polling island to be deleted */
+    PI_ADD_REF(pi, "ps");
+    PI_UNREF(pollset->polling_island, "ps");
+    pollset->polling_island = pi;
+  }
+
+  /* Add an extra ref so that the island does not get destroyed (which means
+     the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
+     epoll_fd */
+  PI_ADD_REF(pi, "ps_work");
+  gpr_mu_unlock(&pollset->mu);
+
+  do {
+    ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
+                        sig_mask);
+    if (ep_rv < 0) {
+      if (errno != EINTR) {
+        gpr_asprintf(&err_msg,
+                     "epoll_wait() epoll fd: %d failed with error: %d (%s)",
+                     epoll_fd, errno, strerror(errno));
+        append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
+      } else {
+        /* We were interrupted. Save an interation by doing a zero timeout
+           epoll_wait to see if there are any other events of interest */
+        GRPC_POLLING_TRACE(
+            "pollset_work: pollset: %p, worker: %p received kick",
+            (void *)pollset, (void *)worker);
+        ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
+      }
+    }
+
+#ifdef GRPC_TSAN
+    /* See the definition of g_poll_sync for more details */
+    gpr_atm_acq_load(&g_epoll_sync);
+#endif /* defined(GRPC_TSAN) */
+
+    for (int i = 0; i < ep_rv; ++i) {
+      void *data_ptr = ep_ev[i].data.ptr;
+      if (data_ptr == &grpc_global_wakeup_fd) {
+        append_error(error,
+                     grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
+                     err_desc);
+      } else if (data_ptr == &polling_island_wakeup_fd) {
+        GRPC_POLLING_TRACE(
+            "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
+            "%d) got merged",
+            (void *)pollset, (void *)worker, epoll_fd);
+        /* This means that our polling island is merged with a different
+           island. We do not have to do anything here since the subsequent call
+           to the function pollset_work_and_unlock() will pick up the correct
+           epoll_fd */
+      } else {
+        grpc_fd *fd = data_ptr;
+        int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
+        int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
+        int write_ev = ep_ev[i].events & EPOLLOUT;
+        if (read_ev || cancel) {
+          fd_become_readable(exec_ctx, fd, pollset);
+        }
+        if (write_ev || cancel) {
+          fd_become_writable(exec_ctx, fd);
+        }
+      }
+    }
+  } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
+
+  GPR_ASSERT(pi != NULL);
+
+  /* Before leaving, release the extra ref we added to the polling island. It
+     is important to use "pi" here (i.e our old copy of pollset->polling_island
+     that we got before releasing the polling island lock). This is because
+     pollset->polling_island pointer might get udpated in other parts of the
+     code when there is an island merge while we are doing epoll_wait() above */
+  PI_UNREF(pi, "ps_work");
+
+  GPR_TIMER_END("pollset_work_and_unlock", 0);
+}
+
+/* pollset->mu lock must be held by the caller before calling this.
+   The function pollset_work() may temporarily release the lock (pollset->mu)
+   during the course of its execution but it will always re-acquire the lock and
+   ensure that it is held by the time the function returns */
+static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+                                grpc_pollset_worker **worker_hdl,
+                                gpr_timespec now, gpr_timespec deadline) {
+  GPR_TIMER_BEGIN("pollset_work", 0);
+  grpc_error *error = GRPC_ERROR_NONE;
+  int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
+
+  sigset_t new_mask;
+
+  grpc_pollset_worker worker;
+  worker.next = worker.prev = NULL;
+  worker.pt_id = pthread_self();
+  gpr_atm_no_barrier_store(&worker.is_kicked, (gpr_atm)0);
+
+  *worker_hdl = &worker;
+
+  gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
+  gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
+
+  if (pollset->kicked_without_pollers) {
+    /* If the pollset was kicked without pollers, pretend that the current
+       worker got the kick and skip polling. A kick indicates that there is some
+       work that needs attention like an event on the completion queue or an
+       alarm */
+    GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
+    pollset->kicked_without_pollers = 0;
+  } else if (!pollset->shutting_down) {
+    /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up
+       (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the
+       worker that there is some pending work that needs immediate attention
+       (like an event on the completion queue, or a polling island merge that
+       results in a new epoll-fd to wait on) and that the worker should not
+       spend time waiting in epoll_pwait().
+
+       A worker can be kicked anytime from the point it is added to the pollset
+       via push_front_worker() (or push_back_worker()) to the point it is
+       removed via remove_worker().
+       If the worker is kicked before/during it calls epoll_pwait(), it should
+       immediately exit from epoll_wait(). If the worker is kicked after it
+       returns from epoll_wait(), then nothing really needs to be done.
+
+       To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all
+       times *except* when it is in epoll_pwait(). This way, the worker never
+       misses acting on a kick */
+
+    if (!g_initialized_sigmask) {
+      sigemptyset(&new_mask);
+      sigaddset(&new_mask, grpc_wakeup_signal);
+      pthread_sigmask(SIG_BLOCK, &new_mask, &g_orig_sigmask);
+      sigdelset(&g_orig_sigmask, grpc_wakeup_signal);
+      g_initialized_sigmask = true;
+      /* new_mask:       The new thread mask which blocks 'grpc_wakeup_signal'.
+                         This is the mask used at all times *except during
+                         epoll_wait()*"
+         g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and
+                         this is the mask to use *during epoll_wait()*
+
+         The new_mask is set on the worker before it is added to the pollset
+         (i.e before it can be kicked) */
+    }
+
+    push_front_worker(pollset, &worker); /* Add worker to pollset */
+
+    pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms,
+                            &g_orig_sigmask, &error);
+    grpc_exec_ctx_flush(exec_ctx);
+
+    gpr_mu_lock(&pollset->mu);
+
+    /* Note: There is no need to reset worker.is_kicked to 0 since we are no
+       longer going to use this worker */
+    remove_worker(pollset, &worker);
+  }
+
+  /* If we are the last worker on the pollset (i.e pollset_has_workers() is
+     false at this point) and the pollset is shutting down, we may have to
+     finish the shutdown process by calling finish_shutdown_locked().
+     See pollset_shutdown() for more details.
+
+     Note: Continuing to access pollset here is safe; it is the caller's
+     responsibility to not destroy a pollset when it has outstanding calls to
+     pollset_work() */
+  if (pollset->shutting_down && !pollset_has_workers(pollset) &&
+      !pollset->finish_shutdown_called) {
+    GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
+    finish_shutdown_locked(exec_ctx, pollset);
+
+    gpr_mu_unlock(&pollset->mu);
+    grpc_exec_ctx_flush(exec_ctx);
+    gpr_mu_lock(&pollset->mu);
+  }
+
+  *worker_hdl = NULL;
+
+  gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
+  gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
+
+  GPR_TIMER_END("pollset_work", 0);
+
+  GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
+  return error;
+}
+
+static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+                           grpc_fd *fd) {
+  grpc_error *error = GRPC_ERROR_NONE;
+
+  gpr_mu_lock(&pollset->mu);
+  gpr_mu_lock(&fd->pi_mu);
+
+  polling_island *pi_new = NULL;
+
+  /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and
+   *    equal, do nothing.
+   * 2) If fd->polling_island and pollset->polling_island are both NULL, create
+   *    a new polling island (with a refcount of 2) and make the polling_island
+   *    fields in both fd and pollset to point to the new island
+   * 3) If one of fd->polling_island or pollset->polling_island is NULL, update
+   *    the NULL polling_island field to point to the non-NULL polling_island
+   *    field (ensure that the refcount on the polling island is incremented by
+   *    1 to account for the newly added reference)
+   * 4) Finally, if fd->polling_island and pollset->polling_island are non-NULL
+   *    and different, merge both the polling islands and update the
+   *    polling_island fields in both fd and pollset to point to the merged
+   *    polling island.
+   */
+  if (fd->polling_island == pollset->polling_island) {
+    pi_new = fd->polling_island;
+    if (pi_new == NULL) {
+      pi_new = polling_island_create(fd, &error);
+
+      GRPC_POLLING_TRACE(
+          "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, "
+          "pollset: %p)",
+          (void *)pi_new, fd->fd, (void *)pollset);
+    }
+  } else if (fd->polling_island == NULL) {
+    pi_new = polling_island_lock(pollset->polling_island);
+    polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
+    gpr_mu_unlock(&pi_new->mu);
+
+    GRPC_POLLING_TRACE(
+        "pollset_add_fd: fd->pi was NULL. pi_new: %p (fd: %d, pollset: %p, "
+        "pollset->pi: %p)",
+        (void *)pi_new, fd->fd, (void *)pollset,
+        (void *)pollset->polling_island);
+  } else if (pollset->polling_island == NULL) {
+    pi_new = polling_island_lock(fd->polling_island);
+    gpr_mu_unlock(&pi_new->mu);
+
+    GRPC_POLLING_TRACE(
+        "pollset_add_fd: pollset->pi was NULL. pi_new: %p (fd: %d, pollset: "
+        "%p, fd->pi: %p",
+        (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island);
+  } else {
+    pi_new = polling_island_merge(fd->polling_island, pollset->polling_island,
+                                  &error);
+    GRPC_POLLING_TRACE(
+        "pollset_add_fd: polling islands merged. pi_new: %p (fd: %d, pollset: "
+        "%p, fd->pi: %p, pollset->pi: %p)",
+        (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island,
+        (void *)pollset->polling_island);
+  }
+
+  /* At this point, pi_new is the polling island that both fd->polling_island
+     and pollset->polling_island must be pointing to */
+
+  if (fd->polling_island != pi_new) {
+    PI_ADD_REF(pi_new, "fd");
+    if (fd->polling_island != NULL) {
+      PI_UNREF(fd->polling_island, "fd");
+    }
+    fd->polling_island = pi_new;
+  }
+
+  if (pollset->polling_island != pi_new) {
+    PI_ADD_REF(pi_new, "ps");
+    if (pollset->polling_island != NULL) {
+      PI_UNREF(pollset->polling_island, "ps");
+    }
+    pollset->polling_island = pi_new;
+  }
+
+  gpr_mu_unlock(&fd->pi_mu);
+  gpr_mu_unlock(&pollset->mu);
+}
+
+/*******************************************************************************
+ * Pollset-set Definitions
+ */
+
+static grpc_pollset_set *pollset_set_create(void) {
+  grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
+  memset(pollset_set, 0, sizeof(*pollset_set));
+  gpr_mu_init(&pollset_set->mu);
+  return pollset_set;
+}
+
+static void pollset_set_destroy(grpc_pollset_set *pollset_set) {
+  size_t i;
+  gpr_mu_destroy(&pollset_set->mu);
+  for (i = 0; i < pollset_set->fd_count; i++) {
+    GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
+  }
+  gpr_free(pollset_set->pollsets);
+  gpr_free(pollset_set->pollset_sets);
+  gpr_free(pollset_set->fds);
+  gpr_free(pollset_set);
+}
+
+static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+                               grpc_pollset_set *pollset_set, grpc_fd *fd) {
+  size_t i;
+  gpr_mu_lock(&pollset_set->mu);
+  if (pollset_set->fd_count == pollset_set->fd_capacity) {
+    pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
+    pollset_set->fds = gpr_realloc(
+        pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
+  }
+  GRPC_FD_REF(fd, "pollset_set");
+  pollset_set->fds[pollset_set->fd_count++] = fd;
+  for (i = 0; i < pollset_set->pollset_count; i++) {
+    pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
+  }
+  for (i = 0; i < pollset_set->pollset_set_count; i++) {
+    pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+  }
+  gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
+                               grpc_pollset_set *pollset_set, grpc_fd *fd) {
+  size_t i;
+  gpr_mu_lock(&pollset_set->mu);
+  for (i = 0; i < pollset_set->fd_count; i++) {
+    if (pollset_set->fds[i] == fd) {
+      pollset_set->fd_count--;
+      GPR_SWAP(grpc_fd *, pollset_set->fds[i],
+               pollset_set->fds[pollset_set->fd_count]);
+      GRPC_FD_UNREF(fd, "pollset_set");
+      break;
+    }
+  }
+  for (i = 0; i < pollset_set->pollset_set_count; i++) {
+    pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+  }
+  gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+                                    grpc_pollset_set *pollset_set,
+                                    grpc_pollset *pollset) {
+  size_t i, j;
+  gpr_mu_lock(&pollset_set->mu);
+  if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
+    pollset_set->pollset_capacity =
+        GPR_MAX(8, 2 * pollset_set->pollset_capacity);
+    pollset_set->pollsets =
+        gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
+                                               sizeof(*pollset_set->pollsets));
+  }
+  pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
+  for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
+    if (fd_is_orphaned(pollset_set->fds[i])) {
+      GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
+    } else {
+      pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
+      pollset_set->fds[j++] = pollset_set->fds[i];
+    }
+  }
+  pollset_set->fd_count = j;
+  gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+                                    grpc_pollset_set *pollset_set,
+                                    grpc_pollset *pollset) {
+  size_t i;
+  gpr_mu_lock(&pollset_set->mu);
+  for (i = 0; i < pollset_set->pollset_count; i++) {
+    if (pollset_set->pollsets[i] == pollset) {
+      pollset_set->pollset_count--;
+      GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
+               pollset_set->pollsets[pollset_set->pollset_count]);
+      break;
+    }
+  }
+  gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+                                        grpc_pollset_set *bag,
+                                        grpc_pollset_set *item) {
+  size_t i, j;
+  gpr_mu_lock(&bag->mu);
+  if (bag->pollset_set_count == bag->pollset_set_capacity) {
+    bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
+    bag->pollset_sets =
+        gpr_realloc(bag->pollset_sets,
+                    bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+  }
+  bag->pollset_sets[bag->pollset_set_count++] = item;
+  for (i = 0, j = 0; i < bag->fd_count; i++) {
+    if (fd_is_orphaned(bag->fds[i])) {
+      GRPC_FD_UNREF(bag->fds[i], "pollset_set");
+    } else {
+      pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
+      bag->fds[j++] = bag->fds[i];
+    }
+  }
+  bag->fd_count = j;
+  gpr_mu_unlock(&bag->mu);
+}
+
+static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+                                        grpc_pollset_set *bag,
+                                        grpc_pollset_set *item) {
+  size_t i;
+  gpr_mu_lock(&bag->mu);
+  for (i = 0; i < bag->pollset_set_count; i++) {
+    if (bag->pollset_sets[i] == item) {
+      bag->pollset_set_count--;
+      GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
+               bag->pollset_sets[bag->pollset_set_count]);
+      break;
+    }
+  }
+  gpr_mu_unlock(&bag->mu);
+}
+
+/* Test helper functions
+ * */
+void *grpc_fd_get_polling_island(grpc_fd *fd) {
+  polling_island *pi;
+
+  gpr_mu_lock(&fd->pi_mu);
+  pi = fd->polling_island;
+  gpr_mu_unlock(&fd->pi_mu);
+
+  return pi;
+}
+
+void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
+  polling_island *pi;
+
+  gpr_mu_lock(&ps->mu);
+  pi = ps->polling_island;
+  gpr_mu_unlock(&ps->mu);
+
+  return pi;
+}
+
+bool grpc_are_polling_islands_equal(void *p, void *q) {
+  polling_island *p1 = p;
+  polling_island *p2 = q;
+
+  /* Note: polling_island_lock_pair() may change p1 and p2 to point to the
+     latest polling islands in their respective linked lists */
+  polling_island_lock_pair(&p1, &p2);
+  polling_island_unlock_pair(p1, p2);
+
+  return p1 == p2;
+}
+
+/*******************************************************************************
+ * Event engine binding
+ */
+
+static void shutdown_engine(void) {
+  fd_global_shutdown();
+  pollset_global_shutdown();
+  polling_island_global_shutdown();
+}
+
+static const grpc_event_engine_vtable vtable = {
+    .pollset_size = sizeof(grpc_pollset),
+
+    .fd_create = fd_create,
+    .fd_wrapped_fd = fd_wrapped_fd,
+    .fd_orphan = fd_orphan,
+    .fd_shutdown = fd_shutdown,
+    .fd_is_shutdown = fd_is_shutdown,
+    .fd_notify_on_read = fd_notify_on_read,
+    .fd_notify_on_write = fd_notify_on_write,
+    .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
+
+    .pollset_init = pollset_init,
+    .pollset_shutdown = pollset_shutdown,
+    .pollset_reset = pollset_reset,
+    .pollset_destroy = pollset_destroy,
+    .pollset_work = pollset_work,
+    .pollset_kick = pollset_kick,
+    .pollset_add_fd = pollset_add_fd,
+
+    .pollset_set_create = pollset_set_create,
+    .pollset_set_destroy = pollset_set_destroy,
+    .pollset_set_add_pollset = pollset_set_add_pollset,
+    .pollset_set_del_pollset = pollset_set_del_pollset,
+    .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
+    .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
+    .pollset_set_add_fd = pollset_set_add_fd,
+    .pollset_set_del_fd = pollset_set_del_fd,
+
+    .kick_poller = kick_poller,
+
+    .shutdown_engine = shutdown_engine,
+};
+
+/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
+ * Create a dummy epoll_fd to make sure epoll support is available */
+static bool is_epoll_available() {
+  int fd = epoll_create1(EPOLL_CLOEXEC);
+  if (fd < 0) {
+    gpr_log(
+        GPR_ERROR,
+        "epoll_create1 failed with error: %d. Not using epoll polling engine",
+        fd);
+    return false;
+  }
+  close(fd);
+  return true;
+}
+
+const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
+  /* If use of signals is disabled, we cannot use epoll engine*/
+  if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
+    return NULL;
+  }
+
+  if (!is_epoll_available()) {
+    return NULL;
+  }
+
+  if (!is_grpc_wakeup_signal_initialized) {
+    grpc_use_signal(SIGRTMIN + 2);
+  }
+
+  fd_global_init();
+
+  if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+    return NULL;
+  }
+
+  if (!GRPC_LOG_IF_ERROR("polling_island_global_init",
+                         polling_island_global_init())) {
+    return NULL;
+  }
+
+  return &vtable;
+}
+
+#else /* defined(GPR_LINUX_EPOLL) */
+#if defined(GPR_POSIX_SOCKET)
+#include "src/core/lib/iomgr/ev_posix.h"
+/* If GPR_LINUX_EPOLL is not defined, it means epoll is not available. Return
+ * NULL */
+const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; }
+#endif /* defined(GPR_POSIX_SOCKET) */
+
+void grpc_use_signal(int signum) {}
+#endif /* !defined(GPR_LINUX_EPOLL) */
diff --git a/include/grpc/grpc_zookeeper.h b/src/core/lib/iomgr/ev_epoll_linux.h
similarity index 71%
rename from include/grpc/grpc_zookeeper.h
rename to src/core/lib/iomgr/ev_epoll_linux.h
index 2b195c18bff42b8e83f0450b661ff741422349e5..7a494aba198531c7ac71f19da61684964b8cc509 100644
--- a/include/grpc/grpc_zookeeper.h
+++ b/src/core/lib/iomgr/ev_epoll_linux.h
@@ -31,29 +31,17 @@
  *
  */
 
-/** Support zookeeper as alternative name system in addition to DNS
- *  Zookeeper name in gRPC is represented as a URI:
- *  zookeeper://host:port/path/service/instance
- *
- *  Where zookeeper is the name system scheme
- *  host:port is the address of a zookeeper server
- *  /path/service/instance is the zookeeper name to be resolved
- *
- *  Refer doc/naming.md for more details
- */
-
-#ifndef GRPC_GRPC_ZOOKEEPER_H
-#define GRPC_GRPC_ZOOKEEPER_H
+#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
+#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include "src/core/lib/iomgr/ev_posix.h"
 
-/** Register zookeeper name resolver in grpc */
-void grpc_zookeeper_register();
+const grpc_event_engine_vtable *grpc_init_epoll_linux(void);
 
-#ifdef __cplusplus
-}
-#endif
+#ifdef GPR_LINUX_EPOLL
+void *grpc_fd_get_polling_island(grpc_fd *fd);
+void *grpc_pollset_get_polling_island(grpc_pollset *ps);
+bool grpc_are_polling_islands_equal(void *p, void *q);
+#endif /* defined(GPR_LINUX_EPOLL) */
 
-#endif /* GRPC_GRPC_ZOOKEEPER_H */
+#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 4c360f13a3261d9f376e68023a9072e0fa57cc98..a3c1e9db9a08b31a84ac97bca201c5ef893f57cd 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -44,6 +44,7 @@
 #include <grpc/support/string_util.h>
 #include <grpc/support/useful.h>
 
+#include "src/core/lib/iomgr/ev_epoll_linux.h"
 #include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
 #include "src/core/lib/iomgr/ev_poll_posix.h"
 #include "src/core/lib/support/env.h"
@@ -53,6 +54,7 @@
 grpc_poll_function_type grpc_poll_function = poll;
 
 static const grpc_event_engine_vtable *g_event_engine;
+static const char *g_poll_strategy_name = NULL;
 
 typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)(void);
 
@@ -62,7 +64,9 @@ typedef struct {
 } event_engine_factory;
 
 static const event_engine_factory g_factories[] = {
-    {"poll", grpc_init_poll_posix}, {"legacy", grpc_init_poll_and_epoll_posix},
+    {"epoll", grpc_init_epoll_linux},
+    {"poll", grpc_init_poll_posix},
+    {"legacy", grpc_init_poll_and_epoll_posix},
 };
 
 static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
@@ -98,6 +102,7 @@ static void try_engine(const char *engine) {
   for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) {
     if (is(engine, g_factories[i].name)) {
       if ((g_event_engine = g_factories[i].factory())) {
+        g_poll_strategy_name = g_factories[i].name;
         gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name);
         return;
       }
@@ -105,6 +110,9 @@ static void try_engine(const char *engine) {
   }
 }
 
+/* Call this only after calling grpc_event_engine_init() */
+const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
+
 void grpc_event_engine_init(void) {
   char *s = gpr_getenv("GRPC_POLL_STRATEGY");
   if (s == NULL) {
@@ -167,11 +175,6 @@ void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
   g_event_engine->fd_notify_on_write(exec_ctx, fd, closure);
 }
 
-grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
-                                                grpc_fd *fd) {
-  return g_event_engine->fd_get_read_notifier_pollset(exec_ctx, fd);
-}
-
 size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
 
 void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 87e9bc4d46f016cce031f979c93f95e522812c8d..579c84ef70725e5a00510627082f27bb72dc1da5 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -99,6 +99,9 @@ typedef struct grpc_event_engine_vtable {
 void grpc_event_engine_init(void);
 void grpc_event_engine_shutdown(void);
 
+/* Return the name of the poll strategy */
+const char *grpc_get_poll_strategy_name();
+
 /* Create a wrapped file descriptor.
    Requires fd is a non-blocking file descriptor.
    This takes ownership of closing fd. */
diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c
new file mode 100644
index 0000000000000000000000000000000000000000..ccbe136db92767bed79a6e33e9a988a64ddd6c80
--- /dev/null
+++ b/src/core/lib/iomgr/network_status_tracker.c
@@ -0,0 +1,118 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include "src/core/lib/iomgr/endpoint.h"
+
+typedef struct endpoint_ll_node {
+  grpc_endpoint *ep;
+  struct endpoint_ll_node *next;
+} endpoint_ll_node;
+
+static endpoint_ll_node *head = NULL;
+static gpr_mu g_endpoint_mutex;
+static gpr_once g_once_init = GPR_ONCE_INIT;
+
+static void destroy_network_status_monitor(void) {
+  if (head != NULL) {
+    gpr_log(GPR_ERROR,
+            "Memory leaked as all network endpoints were not shut down");
+  }
+  gpr_mu_destroy(&g_endpoint_mutex);
+}
+
+static void initialize_network_status_monitor(void) {
+  gpr_mu_init(&g_endpoint_mutex);
+  atexit(destroy_network_status_monitor);
+  // TODO(makarandd): Install callback with OS to monitor network status.
+}
+
+void grpc_network_status_register_endpoint(grpc_endpoint *ep) {
+  gpr_once_init(&g_once_init, initialize_network_status_monitor);
+  gpr_mu_lock(&g_endpoint_mutex);
+  if (head == NULL) {
+    head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
+    head->ep = ep;
+    head->next = NULL;
+  } else {
+    endpoint_ll_node *prev_head = head;
+    head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
+    head->ep = ep;
+    head->next = prev_head;
+  }
+  gpr_mu_unlock(&g_endpoint_mutex);
+}
+
+void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) {
+  gpr_mu_lock(&g_endpoint_mutex);
+  GPR_ASSERT(head);
+  bool found = false;
+  endpoint_ll_node *prev = head;
+  // if we're unregistering the head, just move head to the next
+  if (ep == head->ep) {
+    head = head->next;
+    gpr_free(prev);
+    found = true;
+  } else {
+    for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) {
+      if (ep == curr->ep) {
+        prev->next = curr->next;
+        gpr_free(curr);
+        found = true;
+        break;
+      }
+      prev = curr;
+    }
+  }
+  gpr_mu_unlock(&g_endpoint_mutex);
+  GPR_ASSERT(found);
+}
+
+// Walk the linked-list from head and execute shutdown. It is possible that
+// other threads might be in the process of shutdown as well, but that has
+// no side effect since endpoint shutdown is idempotent.
+void grpc_network_status_shutdown_all_endpoints() {
+  gpr_mu_lock(&g_endpoint_mutex);
+  if (head == NULL) {
+    gpr_mu_unlock(&g_endpoint_mutex);
+    return;
+  }
+  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+  for (endpoint_ll_node *curr = head; curr != NULL; curr = curr->next) {
+    curr->ep->vtable->shutdown(&exec_ctx, curr->ep);
+  }
+  gpr_mu_unlock(&g_endpoint_mutex);
+  grpc_exec_ctx_finish(&exec_ctx);
+}
diff --git a/test/core/profiling/timers_test.c b/src/core/lib/iomgr/network_status_tracker.h
similarity index 55%
rename from test/core/profiling/timers_test.c
rename to src/core/lib/iomgr/network_status_tracker.h
index 284589af1e173429b3bb6aa5ee5ded6d44e5a813..74a1aa8135fb2c46014dc91086ffaa2d5b8a13ee 100644
--- a/test/core/profiling/timers_test.c
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -31,53 +31,11 @@
  *
  */
 
-#include "src/core/lib/profiling/timers.h"
-#include <stdlib.h>
-#include "test/core/util/test_config.h"
+#ifndef GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
+#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
+#include "src/core/lib/iomgr/endpoint.h"
 
-void test_log_events(size_t num_seqs) {
-  size_t start = 0;
-  size_t *state;
-  state = calloc(num_seqs, sizeof(state[0]));
-  while (start < num_seqs) {
-    size_t i;
-    size_t row;
-    if (state[start] == 3) { /* Already done with this posn */
-      start++;
-      continue;
-    }
-
-    row = (size_t)rand() % 10; /* how many in a row */
-    for (i = start; (i < start + row) && (i < num_seqs); i++) {
-      size_t j;
-      size_t advance = 1 + (size_t)rand() % 3; /* how many to advance by */
-      for (j = 0; j < advance; j++) {
-        switch (state[i]) {
-          case 0:
-            GPR_TIMER_MARK(STATE_0, i);
-            state[i]++;
-            break;
-          case 1:
-            GPR_TIMER_MARK(STATE_1, i);
-            state[i]++;
-            break;
-          case 2:
-            GPR_TIMER_MARK(STATE_2, i);
-            state[i]++;
-            break;
-          case 3:
-            break;
-        }
-      }
-    }
-  }
-  free(state);
-}
-
-int main(int argc, char **argv) {
-  grpc_test_init(argc, argv);
-  gpr_timers_global_init();
-  test_log_events(1000000);
-  gpr_timers_global_destroy();
-  return 0;
-}
+void grpc_network_status_register_endpoint(grpc_endpoint *ep);
+void grpc_network_status_unregister_endpoint(grpc_endpoint *ep);
+void grpc_network_status_shutdown_all_endpoints();
+#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c
index 3a1371617e5d5ff2e93bedf41d181de0a18e4c78..d2f6261e2abb11e49b9a4cdfb5977649afc9ccd0 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.c
@@ -169,6 +169,28 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) {
   return GRPC_ERROR_NONE;
 }
 
+/* set a socket to reuse old addresses */
+grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) {
+#ifndef SO_REUSEPORT
+  return GRPC_ERROR_CREATE("SO_REUSEPORT unavailable on compiling system");
+#else
+  int val = (reuse != 0);
+  int newval;
+  socklen_t intlen = sizeof(newval);
+  if (0 != setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val))) {
+    return GRPC_OS_ERROR(errno, "setsockopt(SO_REUSEPORT)");
+  }
+  if (0 != getsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &newval, &intlen)) {
+    return GRPC_OS_ERROR(errno, "getsockopt(SO_REUSEPORT)");
+  }
+  if ((newval != 0) != val) {
+    return GRPC_ERROR_CREATE("Failed to set SO_REUSEPORT");
+  }
+
+  return GRPC_ERROR_NONE;
+#endif
+}
+
 /* disable nagle */
 grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
   int val = (low_latency != 0);
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 30ff39dfa3f5899de5fa53b78fb6ff20c6d0dbb1..7bcc2219aeabf5748da6ff9da516b92d6b4f3eaa 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -55,6 +55,9 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse);
 /* disable nagle */
 grpc_error *grpc_set_socket_low_latency(int fd, int low_latency);
 
+/* set SO_REUSEPORT */
+grpc_error *grpc_set_socket_reuse_port(int fd, int reuse);
+
 /* Returns true if this system can create AF_INET6 sockets bound to ::1.
    The value is probed once, and cached for the life of the process.
 
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 017f52c367748a56dca38bb2230255a718b61cca..2ab45e33ce3fd1f79f03960ef6d1a0a2eab1ca09 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -35,6 +35,7 @@
 
 #ifdef GPR_POSIX_SOCKET
 
+#include "src/core/lib/iomgr/network_status_tracker.h"
 #include "src/core/lib/iomgr/tcp_posix.h"
 
 #include <errno.h>
@@ -152,6 +153,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
 #endif
 
 static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+  grpc_network_status_unregister_endpoint(ep);
   grpc_tcp *tcp = (grpc_tcp *)ep;
   TCP_UNREF(exec_ctx, tcp, "destroy");
 }
@@ -160,7 +162,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
                          grpc_error *error) {
   grpc_closure *cb = tcp->read_cb;
 
-  if (false && grpc_tcp_trace) {
+  if (grpc_tcp_trace) {
     size_t i;
     const char *str = grpc_error_string(error);
     gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -394,7 +396,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
   grpc_tcp *tcp = (grpc_tcp *)ep;
   grpc_error *error = GRPC_ERROR_NONE;
 
-  if (false && grpc_tcp_trace) {
+  if (grpc_tcp_trace) {
     size_t i;
 
     for (i = 0; i < buf->count; i++) {
@@ -474,6 +476,8 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
   tcp->write_closure.cb = tcp_handle_write;
   tcp->write_closure.cb_arg = tcp;
   gpr_slice_buffer_init(&tcp->last_read_buffer);
+  /* Tell network status tracker about new endpoint */
+  grpc_network_status_register_endpoint(&tcp->base);
 
   return &tcp->base;
 }
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 1d6e70dbe921aea3ee845eede6743d5ebd53d412..875a6ca14a7a8f12199c4729a1dceed24015aac7 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -34,6 +34,8 @@
 #ifndef GRPC_CORE_LIB_IOMGR_TCP_SERVER_H
 #define GRPC_CORE_LIB_IOMGR_TCP_SERVER_H
 
+#include <grpc/grpc.h>
+
 #include "src/core/lib/iomgr/closure.h"
 #include "src/core/lib/iomgr/endpoint.h"
 
@@ -59,6 +61,7 @@ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
    If shutdown_complete is not NULL, it will be used by
    grpc_tcp_server_unref() when the ref count reaches zero. */
 grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+                                   const grpc_channel_args *args,
                                    grpc_tcp_server **server);
 
 /* Start listening to bound ports */
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 6fb547bb367b5b01bff564f4a7656b550d5698d3..d3803c3bd0e4d856107038fa93eea6168eaefd9e 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -112,8 +112,10 @@ struct grpc_tcp_server {
   /* destroyed port count: how many ports are completely destroyed */
   size_t destroyed_ports;
 
-  /* is this server shutting down? (boolean) */
-  int shutdown;
+  /* is this server shutting down? */
+  bool shutdown;
+  /* use SO_REUSEPORT */
+  bool so_reuseport;
 
   /* linked list of server ports */
   grpc_tcp_listener *head;
@@ -132,17 +134,45 @@ struct grpc_tcp_server {
   size_t pollset_count;
 
   /* next pollset to assign a channel to */
-  size_t next_pollset_to_assign;
+  gpr_atm next_pollset_to_assign;
 };
 
+static gpr_once check_init = GPR_ONCE_INIT;
+static bool has_so_reuseport;
+
+static void init(void) {
+  int s = socket(AF_INET, SOCK_STREAM, 0);
+  if (s >= 0) {
+    has_so_reuseport = GRPC_LOG_IF_ERROR("check for SO_REUSEPORT",
+                                         grpc_set_socket_reuse_port(s, 1));
+    close(s);
+  }
+}
+
 grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+                                   const grpc_channel_args *args,
                                    grpc_tcp_server **server) {
+  gpr_once_init(&check_init, init);
+
   grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+  s->so_reuseport = has_so_reuseport;
+  for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+    if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
+      if (args->args[i].type == GRPC_ARG_INTEGER) {
+        s->so_reuseport =
+            has_so_reuseport && (args->args[i].value.integer != 0);
+      } else {
+        gpr_free(s);
+        return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
+                                 " must be an integer");
+      }
+    }
+  }
   gpr_ref_init(&s->refs, 1);
   gpr_mu_init(&s->mu);
   s->active_ports = 0;
   s->destroyed_ports = 0;
-  s->shutdown = 0;
+  s->shutdown = false;
   s->shutdown_starting.head = NULL;
   s->shutdown_starting.tail = NULL;
   s->shutdown_complete = shutdown_complete;
@@ -151,7 +181,7 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
   s->head = NULL;
   s->tail = NULL;
   s->nports = 0;
-  s->next_pollset_to_assign = 0;
+  gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0);
   *server = s;
   return GRPC_ERROR_NONE;
 }
@@ -218,7 +248,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
   gpr_mu_lock(&s->mu);
 
   GPR_ASSERT(!s->shutdown);
-  s->shutdown = 1;
+  s->shutdown = true;
 
   /* shutdown all fd's */
   if (s->active_ports) {
@@ -268,13 +298,19 @@ static int get_max_accept_queue_size(void) {
 
 /* Prepare a recently-created socket for listening. */
 static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
-                                  size_t addr_len, int *port) {
+                                  size_t addr_len, bool so_reuseport,
+                                  int *port) {
   struct sockaddr_storage sockname_temp;
   socklen_t sockname_len;
   grpc_error *err = GRPC_ERROR_NONE;
 
   GPR_ASSERT(fd >= 0);
 
+  if (so_reuseport) {
+    err = grpc_set_socket_reuse_port(fd, 1);
+    if (err != GRPC_ERROR_NONE) goto error;
+  }
+
   err = grpc_set_socket_nonblocking(fd, 1);
   if (err != GRPC_ERROR_NONE) goto error;
   err = grpc_set_socket_cloexec(fd, 1);
@@ -333,7 +369,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
   }
 
   read_notifier_pollset =
-      sp->server->pollsets[(sp->server->next_pollset_to_assign++) %
+      sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
+                               &sp->server->next_pollset_to_assign, 1) %
                            sp->server->pollset_count];
 
   /* loop until accept4 returns EAGAIN, and then re-arm notification */
@@ -407,7 +444,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
   char *addr_str;
   char *name;
 
-  grpc_error *err = prepare_socket(fd, addr, addr_len, &port);
+  grpc_error *err = prepare_socket(fd, addr, addr_len, s->so_reuseport, &port);
   if (err == GRPC_ERROR_NONE) {
     GPR_ASSERT(port > 0);
     grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
@@ -443,6 +480,52 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
   return err;
 }
 
+static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
+  grpc_tcp_listener *sp = NULL;
+  char *addr_str;
+  char *name;
+  grpc_error *err;
+
+  for (grpc_tcp_listener *l = listener->next; l && l->is_sibling; l = l->next) {
+    l->fd_index += count;
+  }
+
+  for (unsigned i = 0; i < count; i++) {
+    int fd, port;
+    grpc_dualstack_mode dsmode;
+    err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0,
+                                       &dsmode, &fd);
+    if (err != GRPC_ERROR_NONE) return err;
+    err = prepare_socket(fd, &listener->addr.sockaddr, listener->addr_len, true,
+                         &port);
+    if (err != GRPC_ERROR_NONE) return err;
+    listener->server->nports++;
+    grpc_sockaddr_to_string(&addr_str, &listener->addr.sockaddr, 1);
+    gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
+    sp = gpr_malloc(sizeof(grpc_tcp_listener));
+    sp->next = listener->next;
+    listener->next = sp;
+    sp->server = listener->server;
+    sp->fd = fd;
+    sp->emfd = grpc_fd_create(fd, name);
+    memcpy(sp->addr.untyped, listener->addr.untyped, listener->addr_len);
+    sp->addr_len = listener->addr_len;
+    sp->port = port;
+    sp->port_index = listener->port_index;
+    sp->fd_index = listener->fd_index + count - i;
+    sp->is_sibling = 1;
+    sp->sibling = listener->is_sibling ? listener->sibling : listener;
+    GPR_ASSERT(sp->emfd);
+    while (listener->server->tail->next != NULL) {
+      listener->server->tail = listener->server->tail->next;
+    }
+    gpr_free(addr_str);
+    gpr_free(name);
+  }
+
+  return GRPC_ERROR_NONE;
+}
+
 grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
                                      size_t addr_len, int *out_port) {
   grpc_tcp_listener *sp;
@@ -599,14 +682,29 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
   s->on_accept_cb_arg = on_accept_cb_arg;
   s->pollsets = pollsets;
   s->pollset_count = pollset_count;
-  for (sp = s->head; sp; sp = sp->next) {
-    for (i = 0; i < pollset_count; i++) {
-      grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
+  sp = s->head;
+  while (sp != NULL) {
+    if (s->so_reuseport && pollset_count > 1) {
+      GPR_ASSERT(GRPC_LOG_IF_ERROR(
+          "clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
+      for (i = 0; i < pollset_count; i++) {
+        grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
+        sp->read_closure.cb = on_read;
+        sp->read_closure.cb_arg = sp;
+        grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+        s->active_ports++;
+        sp = sp->next;
+      }
+    } else {
+      for (i = 0; i < pollset_count; i++) {
+        grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
+      }
+      sp->read_closure.cb = on_read;
+      sp->read_closure.cb_arg = sp;
+      grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+      s->active_ports++;
+      sp = sp->next;
     }
-    sp->read_closure.cb = on_read;
-    sp->read_closure.cb_arg = sp;
-    grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
-    s->active_ports++;
   }
   gpr_mu_unlock(&s->mu);
 }
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index 2a51671ec71a4d8d7016a43af46220714c51f51a..7b0966704c2bf01e05ed897a8b0a7ab208e77eb6 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -103,6 +103,7 @@ struct grpc_tcp_server {
 /* Public function. Allocates the proper data structures to hold a
    grpc_tcp_server. */
 grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+                                   const grpc_channel_args *args,
                                    grpc_tcp_server **server) {
   grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
   gpr_ref_init(&s->refs, 1);
@@ -396,7 +397,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
                                         size_t addr_len, unsigned port_index,
                                         grpc_tcp_listener **listener) {
   grpc_tcp_listener *sp = NULL;
-  int port;
+  int port = -1;
   int status;
   GUID guid = WSAID_ACCEPTEX;
   DWORD ioctl_num_bytes;
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index b2af8030aaa5face892825d2456c8775449d7ffd..37ab59021e367f14c86aa1f5e2a2235913b16623 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -37,6 +37,7 @@
 
 #include <limits.h>
 
+#include "src/core/lib/iomgr/network_status_tracker.h"
 #include "src/core/lib/iomgr/sockaddr_windows.h"
 
 #include <grpc/support/alloc.h>
@@ -378,6 +379,7 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
 }
 
 static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+  grpc_network_status_unregister_endpoint(ep);
   grpc_tcp *tcp = (grpc_tcp *)ep;
   TCP_UNREF(tcp, "destroy");
 }
@@ -401,6 +403,9 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
   grpc_closure_init(&tcp->on_read, on_read, tcp);
   grpc_closure_init(&tcp->on_write, on_write, tcp);
   tcp->peer_string = gpr_strdup(peer_string);
+  /* Tell network status tracking code about the new endpoint */
+  grpc_network_status_register_endpoint(&tcp->base);
+
   return &tcp->base;
 }
 
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 16150687d3da715f046f990bd57e895381236e86..1ebccf2ee2e05955eb6a88f526dcff4ca828db96 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -243,13 +243,13 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
 
   if (!grpc_set_socket_sndbuf(fd, buffer_size_bytes)) {
     gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes",
-            buf_size_bytes);
+            buffer_size_bytes);
     goto error;
   }
 
   if (!grpc_set_socket_rcvbuf(fd, buffer_size_bytes)) {
     gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes",
-            buf_size_bytes);
+            buffer_size_bytes);
     goto error;
   }
 
diff --git a/src/core/lib/iomgr/wakeup_fd_eventfd.c b/src/core/lib/iomgr/wakeup_fd_eventfd.c
index 667b4a5f901b933bc5d1f20d08bc2d3d39456565..95f61023306ab9e1f503df3ea747efdbec76723d 100644
--- a/src/core/lib/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/lib/iomgr/wakeup_fd_eventfd.c
@@ -84,8 +84,10 @@ static void eventfd_destroy(grpc_wakeup_fd* fd_info) {
 }
 
 static int eventfd_check_availability(void) {
-  /* TODO(klempner): Actually check if eventfd is available */
-  return 1;
+  const int efd = eventfd(0, 0);
+  const int is_available = efd >= 0;
+  if (is_available) close(efd);
+  return is_available;
 }
 
 const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
diff --git a/src/core/lib/profiling/basic_timers.c b/src/core/lib/profiling/basic_timers.c
index 50082cd7ee40329f1c03402b7bab4057cf7473a3..51813d0461743341f5eadf8e6b133ce8621e79b1 100644
--- a/src/core/lib/profiling/basic_timers.c
+++ b/src/core/lib/profiling/basic_timers.c
@@ -141,11 +141,11 @@ static void write_log(gpr_timer_log *log) {
       entry->tm = gpr_time_0(entry->tm.clock_type);
     }
     fprintf(output_file,
-            "{\"t\": %lld.%09d, \"thd\": \"%d\", \"type\": \"%c\", \"tag\": "
+            "{\"t\": %" PRId64
+            ".%09d, \"thd\": \"%d\", \"type\": \"%c\", \"tag\": "
             "\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
-            (long long)entry->tm.tv_sec, (int)entry->tm.tv_nsec, entry->thd,
-            entry->type, entry->tagstr, entry->file, entry->line,
-            entry->important);
+            entry->tm.tv_sec, entry->tm.tv_nsec, entry->thd, entry->type,
+            entry->tagstr, entry->file, entry->line, entry->important);
   }
 }
 
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c
index 07db8bfd751160f91b80be15167a3210323ff878..850e41e6460b41bfc40251fd7606281fbc781c8c 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.c
+++ b/src/core/lib/security/credentials/composite/composite_credentials.c
@@ -72,11 +72,12 @@ static void composite_call_md_context_destroy(
 static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *user_data,
                                        grpc_credentials_md *md_elems,
                                        size_t num_md,
-                                       grpc_credentials_status status) {
+                                       grpc_credentials_status status,
+                                       const char *error_details) {
   grpc_composite_call_credentials_metadata_context *ctx =
       (grpc_composite_call_credentials_metadata_context *)user_data;
   if (status != GRPC_CREDENTIALS_OK) {
-    ctx->cb(exec_ctx, ctx->user_data, NULL, 0, status);
+    ctx->cb(exec_ctx, ctx->user_data, NULL, 0, status, error_details);
     return;
   }
 
@@ -101,7 +102,7 @@ static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *user_data,
 
   /* We're done!. */
   ctx->cb(exec_ctx, ctx->user_data, ctx->md_elems->entries,
-          ctx->md_elems->num_entries, GRPC_CREDENTIALS_OK);
+          ctx->md_elems->num_entries, GRPC_CREDENTIALS_OK, NULL);
   composite_call_md_context_destroy(ctx);
 }
 
diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.c
index 0eadaec1912ae89940084811416b91f3836c4492..029a357261644dbf001078992ad10d47cda440f0 100644
--- a/src/core/lib/security/credentials/credentials.c
+++ b/src/core/lib/security/credentials/credentials.c
@@ -117,7 +117,7 @@ void grpc_call_credentials_get_request_metadata(
     grpc_credentials_metadata_cb cb, void *user_data) {
   if (creds == NULL || creds->vtable->get_request_metadata == NULL) {
     if (cb != NULL) {
-      cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+      cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK, NULL);
     }
     return;
   }
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index ce235e3a1df4a76cc60c9454b52678dec37d8e60..8e9d842eaddddcc483625a2d41c892b135a2842a 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -156,11 +156,10 @@ void grpc_credentials_md_store_unref(grpc_credentials_md_store *store);
 
 /* --- grpc_call_credentials. --- */
 
-typedef void (*grpc_credentials_metadata_cb)(grpc_exec_ctx *exec_ctx,
-                                             void *user_data,
-                                             grpc_credentials_md *md_elems,
-                                             size_t num_md,
-                                             grpc_credentials_status status);
+/* error_details must be NULL if status is GRPC_CREDENTIALS_OK. */
+typedef void (*grpc_credentials_metadata_cb)(
+    grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
+    size_t num_md, grpc_credentials_status status, const char *error_details);
 
 typedef struct {
   void (*destruct)(grpc_call_credentials *c);
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index ee6d964de178806d2bdd6aced8e97d9ba9654f3e..51cafd986fb89f6196e4d01d123786c4f2d48956 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -100,7 +100,7 @@ static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
       (grpc_credentials_metadata_request *)user_data;
   grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
   r->cb(exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
-        GRPC_CREDENTIALS_OK);
+        GRPC_CREDENTIALS_OK, NULL);
   grpc_credentials_metadata_request_destroy(r);
 }
 
@@ -117,7 +117,7 @@ static void md_only_test_get_request_metadata(
         grpc_closure_create(on_simulated_token_fetch_done, cb_arg),
         GRPC_ERROR_NONE);
   } else {
-    cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
+    cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK, NULL);
   }
 }
 
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.c b/src/core/lib/security/credentials/iam/iam_credentials.c
index 64d58718449d137d1803592591609d7bcb1b4e75..370a384d0e9127ab4a78bbd20bbce603995ce7cd 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.c
+++ b/src/core/lib/security/credentials/iam/iam_credentials.c
@@ -55,7 +55,7 @@ static void iam_get_request_metadata(grpc_exec_ctx *exec_ctx,
                                      void *user_data) {
   grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds;
   cb(exec_ctx, user_data, c->iam_md->entries, c->iam_md->num_entries,
-     GRPC_CREDENTIALS_OK);
+     GRPC_CREDENTIALS_OK, NULL);
 }
 
 static grpc_call_credentials_vtable iam_vtable = {iam_destruct,
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.c b/src/core/lib/security/credentials/jwt/jwt_credentials.c
index 973fb75eaab472953cedad5b11e6fb2b176b9e1d..f87ba0ce8d1d8a0f1b371f2694c1e37d04ead636 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.c
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.c
@@ -113,10 +113,11 @@ static void jwt_get_request_metadata(grpc_exec_ctx *exec_ctx,
 
   if (jwt_md != NULL) {
     cb(exec_ctx, user_data, jwt_md->entries, jwt_md->num_entries,
-       GRPC_CREDENTIALS_OK);
+       GRPC_CREDENTIALS_OK, NULL);
     grpc_credentials_md_store_unref(jwt_md);
   } else {
-    cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
+    cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_ERROR,
+       "Could not generate JWT.");
   }
 }
 
@@ -149,11 +150,11 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
       "grpc_service_account_jwt_access_credentials_create("
       "json_key=%s, "
       "token_lifetime="
-      "gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+      "gpr_timespec { tv_sec: %" PRId64
+      ", tv_nsec: %d, clock_type: %d }, "
       "reserved=%p)",
-      5,
-      (json_key, (long long)token_lifetime.tv_sec, (int)token_lifetime.tv_nsec,
-       (int)token_lifetime.clock_type, reserved));
+      5, (json_key, token_lifetime.tv_sec, token_lifetime.tv_nsec,
+          (int)token_lifetime.clock_type, reserved));
   GPR_ASSERT(reserved == NULL);
   return grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
       grpc_auth_json_key_create_from_string(json_key), token_lifetime);
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index 1102553dd391bb95101fb35a4b38e0a7c22fae5f..c22ea5c468b43ecbeedfee1d38652191dc294dff 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -235,10 +235,11 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
     c->token_expiration =
         gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime);
     r->cb(exec_ctx, r->user_data, c->access_token_md->entries,
-          c->access_token_md->num_entries, status);
+          c->access_token_md->num_entries, GRPC_CREDENTIALS_OK, NULL);
   } else {
     c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
-    r->cb(exec_ctx, r->user_data, NULL, 0, status);
+    r->cb(exec_ctx, r->user_data, NULL, 0, status,
+          "Error occured when fetching oauth2 token.");
   }
   gpr_mu_unlock(&c->mu);
   grpc_credentials_metadata_request_destroy(r);
@@ -266,7 +267,7 @@ static void oauth2_token_fetcher_get_request_metadata(
   }
   if (cached_access_token_md != NULL) {
     cb(exec_ctx, user_data, cached_access_token_md->entries,
-       cached_access_token_md->num_entries, GRPC_CREDENTIALS_OK);
+       cached_access_token_md->num_entries, GRPC_CREDENTIALS_OK, NULL);
     grpc_credentials_md_store_unref(cached_access_token_md);
   } else {
     c->fetch_func(
@@ -404,7 +405,8 @@ static void access_token_get_request_metadata(
     grpc_polling_entity *pollent, grpc_auth_metadata_context context,
     grpc_credentials_metadata_cb cb, void *user_data) {
   grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
-  cb(exec_ctx, user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
+  cb(exec_ctx, user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK,
+     NULL);
 }
 
 static grpc_call_credentials_vtable access_token_vtable = {
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c
index 9fb55e8466c0aad93438ba55fbb91a13148a0e82..824ff081dc0ea06abf394a88601cf3184db6e901 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.c
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c
@@ -67,7 +67,8 @@ static void plugin_md_request_metadata_ready(void *request,
       gpr_log(GPR_ERROR, "Getting metadata from plugin failed with error: %s",
               error_details);
     }
-    r->cb(&exec_ctx, r->user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
+    r->cb(&exec_ctx, r->user_data, NULL, 0, GRPC_CREDENTIALS_ERROR,
+          error_details);
   } else {
     size_t i;
     grpc_credentials_md *md_array = NULL;
@@ -79,7 +80,7 @@ static void plugin_md_request_metadata_ready(void *request,
             gpr_slice_from_copied_buffer(md[i].value, md[i].value_length);
       }
     }
-    r->cb(&exec_ctx, r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK);
+    r->cb(&exec_ctx, r->user_data, md_array, num_md, GRPC_CREDENTIALS_OK, NULL);
     if (md_array != NULL) {
       for (i = 0; i < num_md; i++) {
         gpr_slice_unref(md_array[i].key);
@@ -107,7 +108,7 @@ static void plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
     c->plugin.get_metadata(c->plugin.state, context,
                            plugin_md_request_metadata_ready, request);
   } else {
-    cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK);
+    cb(exec_ctx, user_data, NULL, 0, GRPC_CREDENTIALS_OK, NULL);
   }
 }
 
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index 76be2acd728f25dd9631c6e592a49d030b066cd6..ed7929aa27e9d52fd9401f6ba4d67ced00a4a757 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -91,14 +91,16 @@ static void bubble_up_error(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
                             grpc_status_code status, const char *error_msg) {
   call_data *calld = elem->call_data;
   gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg);
-  grpc_transport_stream_op_add_cancellation(&calld->op, status);
+  gpr_slice error_slice = gpr_slice_from_copied_string(error_msg);
+  grpc_transport_stream_op_add_close(&calld->op, status, &error_slice);
   grpc_call_next_op(exec_ctx, elem, &calld->op);
 }
 
 static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
                                     grpc_credentials_md *md_elems,
                                     size_t num_md,
-                                    grpc_credentials_status status) {
+                                    grpc_credentials_status status,
+                                    const char *error_details) {
   grpc_call_element *elem = (grpc_call_element *)user_data;
   call_data *calld = elem->call_data;
   grpc_transport_stream_op *op = &calld->op;
@@ -107,7 +109,9 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
   reset_auth_metadata_context(&calld->auth_md_context);
   if (status != GRPC_CREDENTIALS_OK) {
     bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED,
-                    "Credentials failed to get metadata.");
+                    (error_details != NULL && strlen(error_details) > 0)
+                        ? error_details
+                        : "Credentials failed to get metadata.");
     return;
   }
   GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
@@ -172,7 +176,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
     calld->creds = grpc_composite_call_credentials_create(channel_call_creds,
                                                           ctx->creds, NULL);
     if (calld->creds == NULL) {
-      bubble_up_error(exec_ctx, elem, GRPC_STATUS_INTERNAL,
+      bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED,
                       "Incompatible credentials set on channel and call.");
       return;
     }
@@ -201,7 +205,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
     char *error_msg;
     gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
                  grpc_mdstr_as_c_string(calld->host));
-    bubble_up_error(exec_ctx, elem, GRPC_STATUS_INTERNAL, error_msg);
+    bubble_up_error(exec_ctx, elem, GRPC_STATUS_UNAUTHENTICATED, error_msg);
     gpr_free(error_msg);
   }
 }
@@ -220,8 +224,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
   grpc_linked_mdelem *l;
   grpc_client_security_context *sec_ctx = NULL;
 
-  if (calld->security_context_set == 0 &&
-      op->cancel_with_status == GRPC_STATUS_OK) {
+  if (calld->security_context_set == 0 && op->cancel_error == GRPC_ERROR_NONE) {
     calld->security_context_set = 1;
     GPR_ASSERT(op->context);
     if (op->context[GRPC_CONTEXT_SECURITY].value == NULL) {
diff --git a/src/core/lib/support/time.c b/src/core/lib/support/time.c
index 57f83311945e11ac191d8a15a1fa3c2f555b08a7..5a7d043aed8f352fb0b10b6b42cd561a4bc44357 100644
--- a/src/core/lib/support/time.c
+++ b/src/core/lib/support/time.c
@@ -80,103 +80,67 @@ gpr_timespec gpr_inf_past(gpr_clock_type type) {
   return out;
 }
 
-/* TODO(ctiller): consider merging _nanos, _micros, _millis into a single
-   function for maintainability. Similarly for _seconds, _minutes, and _hours */
-
-gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) {
-  gpr_timespec result;
-  result.clock_type = type;
-  if (ns == INT64_MAX) {
-    result = gpr_inf_future(type);
-  } else if (ns == INT64_MIN) {
-    result = gpr_inf_past(type);
-  } else if (ns >= 0) {
-    result.tv_sec = ns / GPR_NS_PER_SEC;
-    result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC);
+static gpr_timespec to_seconds_from_sub_second_time(int64_t time_in_units,
+                                                    int64_t units_per_sec,
+                                                    gpr_clock_type type) {
+  gpr_timespec out;
+  if (time_in_units == INT64_MAX) {
+    out = gpr_inf_future(type);
+  } else if (time_in_units == INT64_MIN) {
+    out = gpr_inf_past(type);
   } else {
-    /* Calculation carefully formulated to avoid any possible under/overflow. */
-    result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1;
-    result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC);
+    if (time_in_units >= 0) {
+      out.tv_sec = time_in_units / units_per_sec;
+    } else {
+      out.tv_sec = (-((units_per_sec - 1) - (time_in_units + units_per_sec)) /
+                    units_per_sec) -
+                   1;
+    }
+    out.tv_nsec = (int32_t)((time_in_units - out.tv_sec * units_per_sec) *
+                            GPR_NS_PER_SEC / units_per_sec);
+    out.clock_type = type;
   }
-  return result;
+  return out;
 }
 
-gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) {
-  gpr_timespec result;
-  result.clock_type = type;
-  if (us == INT64_MAX) {
-    result = gpr_inf_future(type);
-  } else if (us == INT64_MIN) {
-    result = gpr_inf_past(type);
-  } else if (us >= 0) {
-    result.tv_sec = us / 1000000;
-    result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
+static gpr_timespec to_seconds_from_above_second_time(int64_t time_in_units,
+                                                      int64_t secs_per_unit,
+                                                      gpr_clock_type type) {
+  gpr_timespec out;
+  if (time_in_units >= INT64_MAX / secs_per_unit) {
+    out = gpr_inf_future(type);
+  } else if (time_in_units <= INT64_MIN / secs_per_unit) {
+    out = gpr_inf_past(type);
   } else {
-    /* Calculation carefully formulated to avoid any possible under/overflow. */
-    result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1;
-    result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
+    out.tv_sec = time_in_units * secs_per_unit;
+    out.tv_nsec = 0;
+    out.clock_type = type;
   }
-  return result;
+  return out;
+}
+
+gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) {
+  return to_seconds_from_sub_second_time(ns, GPR_NS_PER_SEC, type);
+}
+
+gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) {
+  return to_seconds_from_sub_second_time(us, GPR_US_PER_SEC, type);
 }
 
 gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) {
-  gpr_timespec result;
-  result.clock_type = type;
-  if (ms == INT64_MAX) {
-    result = gpr_inf_future(type);
-  } else if (ms == INT64_MIN) {
-    result = gpr_inf_past(type);
-  } else if (ms >= 0) {
-    result.tv_sec = ms / 1000;
-    result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000);
-  } else {
-    /* Calculation carefully formulated to avoid any possible under/overflow. */
-    result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1;
-    result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000);
-  }
-  return result;
+  return to_seconds_from_sub_second_time(ms, GPR_MS_PER_SEC, type);
 }
 
 gpr_timespec gpr_time_from_seconds(int64_t s, gpr_clock_type type) {
-  gpr_timespec result;
-  result.clock_type = type;
-  if (s == INT64_MAX) {
-    result = gpr_inf_future(type);
-  } else if (s == INT64_MIN) {
-    result = gpr_inf_past(type);
-  } else {
-    result.tv_sec = s;
-    result.tv_nsec = 0;
-  }
-  return result;
+  return to_seconds_from_sub_second_time(s, 1, type);
 }
 
 gpr_timespec gpr_time_from_minutes(int64_t m, gpr_clock_type type) {
-  gpr_timespec result;
-  result.clock_type = type;
-  if (m >= INT64_MAX / 60) {
-    result = gpr_inf_future(type);
-  } else if (m <= INT64_MIN / 60) {
-    result = gpr_inf_past(type);
-  } else {
-    result.tv_sec = m * 60;
-    result.tv_nsec = 0;
-  }
-  return result;
+  return to_seconds_from_above_second_time(m, 60, type);
 }
 
 gpr_timespec gpr_time_from_hours(int64_t h, gpr_clock_type type) {
-  gpr_timespec result;
-  result.clock_type = type;
-  if (h >= INT64_MAX / 3600) {
-    result = gpr_inf_future(type);
-  } else if (h <= INT64_MIN / 3600) {
-    result = gpr_inf_past(type);
-  } else {
-    result.tv_sec = h * 3600;
-    result.tv_nsec = 0;
-  }
-  return result;
+  return to_seconds_from_above_second_time(h, 3600, type);
 }
 
 gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
diff --git a/src/core/lib/surface/byte_buffer_reader.c b/src/core/lib/surface/byte_buffer_reader.c
index c97079f6385bfbb96375bf4d6950ec6cecdd3b0c..310bacb2c944ed5fe3d8091b16377d4b2b47e42f 100644
--- a/src/core/lib/surface/byte_buffer_reader.c
+++ b/src/core/lib/surface/byte_buffer_reader.c
@@ -54,8 +54,8 @@ static int is_compressed(grpc_byte_buffer *buffer) {
   return 1 /* GPR_TRUE */;
 }
 
-void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
-                                  grpc_byte_buffer *buffer) {
+int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
+                                 grpc_byte_buffer *buffer) {
   gpr_slice_buffer decompressed_slices_buffer;
   reader->buffer_in = buffer;
   switch (reader->buffer_in->type) {
@@ -67,9 +67,10 @@ void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
                                 &decompressed_slices_buffer) == 0) {
           gpr_log(GPR_ERROR,
                   "Unexpected error decompressing data for algorithm with enum "
-                  "value '%d'. Reading data as if it were uncompressed.",
+                  "value '%d'.",
                   reader->buffer_in->data.raw.compression);
-          reader->buffer_out = reader->buffer_in;
+          memset(reader, 0, sizeof(*reader));
+          return 0;
         } else { /* all fine */
           reader->buffer_out =
               grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices,
@@ -82,6 +83,7 @@ void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
       reader->current.index = 0;
       break;
   }
+  return 1;
 }
 
 void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) {
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 04291b0ee0a83d155796554f85981856a71286eb..fc9df76dc186418e57e6526bf217a8290d7b6418 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -259,7 +259,8 @@ grpc_call *grpc_call_create(
       call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
     }
   }
-  call->send_deadline = send_deadline;
+  call->send_deadline =
+      gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
   GRPC_CHANNEL_INTERNAL_REF(channel, "call");
   /* initial refcount dropped by grpc_call_destroy */
   grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
@@ -402,8 +403,51 @@ static void set_status_code(grpc_call *call, status_source source,
 
   call->status[source].is_set = 1;
   call->status[source].code = (grpc_status_code)status;
+}
 
-  /* TODO(ctiller): what to do about the flush that was previously here */
+static void set_status_details(grpc_call *call, status_source source,
+                               grpc_mdstr *status) {
+  if (call->status[source].details != NULL) {
+    GRPC_MDSTR_UNREF(status);
+  } else {
+    call->status[source].details = status;
+  }
+}
+
+static void get_final_status(grpc_call *call,
+                             void (*set_value)(grpc_status_code code,
+                                               void *user_data),
+                             void *set_value_user_data) {
+  int i;
+  for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+    if (call->status[i].is_set) {
+      set_value(call->status[i].code, set_value_user_data);
+      return;
+    }
+  }
+  if (call->is_client) {
+    set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
+  } else {
+    set_value(GRPC_STATUS_OK, set_value_user_data);
+  }
+}
+
+static void set_status_from_error(grpc_call *call, status_source source,
+                                  grpc_error *error) {
+  intptr_t status;
+  if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status)) {
+    set_status_code(call, source, (uint32_t)status);
+  } else {
+    set_status_code(call, source, GRPC_STATUS_INTERNAL);
+  }
+  const char *msg = grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
+  bool free_msg = false;
+  if (msg == NULL) {
+    free_msg = true;
+    msg = grpc_error_string(error);
+  }
+  set_status_details(call, source, grpc_mdstr_from_string(msg));
+  if (free_msg) grpc_error_free_string(msg);
 }
 
 static void set_incoming_compression_algorithm(
@@ -492,32 +536,6 @@ uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
   return encodings_accepted_by_peer;
 }
 
-static void set_status_details(grpc_call *call, status_source source,
-                               grpc_mdstr *status) {
-  if (call->status[source].details != NULL) {
-    GRPC_MDSTR_UNREF(call->status[source].details);
-  }
-  call->status[source].details = status;
-}
-
-static void get_final_status(grpc_call *call,
-                             void (*set_value)(grpc_status_code code,
-                                               void *user_data),
-                             void *set_value_user_data) {
-  int i;
-  for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
-    if (call->status[i].is_set) {
-      set_value(call->status[i].code, set_value_user_data);
-      return;
-    }
-  }
-  if (call->is_client) {
-    set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
-  } else {
-    set_value(GRPC_STATUS_OK, set_value_user_data);
-  }
-}
-
 static void get_final_details(grpc_call *call, char **out_details,
                               size_t *out_details_capacity) {
   int i;
@@ -741,8 +759,7 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
 typedef struct termination_closure {
   grpc_closure closure;
   grpc_call *call;
-  grpc_status_code status;
-  gpr_slice optional_message;
+  grpc_error *error;
   grpc_closure *op_closure;
   enum { TC_CANCEL, TC_CLOSE } type;
 } termination_closure;
@@ -758,7 +775,7 @@ static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp,
       GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "close");
       break;
   }
-  gpr_slice_unref(tc->optional_message);
+  GRPC_ERROR_UNREF(tc->error);
   grpc_exec_ctx_sched(exec_ctx, tc->op_closure, GRPC_ERROR_NONE, NULL);
   gpr_free(tc);
 }
@@ -767,7 +784,7 @@ static void send_cancel(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
   grpc_transport_stream_op op;
   termination_closure *tc = tcp;
   memset(&op, 0, sizeof(op));
-  op.cancel_with_status = tc->status;
+  op.cancel_error = tc->error;
   /* reuse closure to catch completion */
   grpc_closure_init(&tc->closure, done_termination, tc);
   op.on_complete = &tc->closure;
@@ -778,8 +795,7 @@ static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
   grpc_transport_stream_op op;
   termination_closure *tc = tcp;
   memset(&op, 0, sizeof(op));
-  tc->optional_message = gpr_slice_ref(tc->optional_message);
-  grpc_transport_stream_op_add_close(&op, tc->status, &tc->optional_message);
+  op.close_error = tc->error;
   /* reuse closure to catch completion */
   grpc_closure_init(&tc->closure, done_termination, tc);
   tc->op_closure = op.on_complete;
@@ -789,14 +805,7 @@ static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
 
 static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
                                              termination_closure *tc) {
-  grpc_mdstr *details = NULL;
-  if (GPR_SLICE_LENGTH(tc->optional_message) > 0) {
-    tc->optional_message = gpr_slice_ref(tc->optional_message);
-    details = grpc_mdstr_from_slice(tc->optional_message);
-  }
-
-  set_status_code(tc->call, STATUS_FROM_API_OVERRIDE, (uint32_t)tc->status);
-  set_status_details(tc->call, STATUS_FROM_API_OVERRIDE, details);
+  set_status_from_error(tc->call, STATUS_FROM_API_OVERRIDE, tc->error);
 
   if (tc->type == TC_CANCEL) {
     grpc_closure_init(&tc->closure, send_cancel, tc);
@@ -812,13 +821,15 @@ static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
 static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
                                           grpc_status_code status,
                                           const char *description) {
+  GPR_ASSERT(status != GRPC_STATUS_OK);
   termination_closure *tc = gpr_malloc(sizeof(*tc));
   memset(tc, 0, sizeof(termination_closure));
   tc->type = TC_CANCEL;
   tc->call = c;
-  tc->optional_message = gpr_slice_from_copied_string(description);
-  GPR_ASSERT(status != GRPC_STATUS_OK);
-  tc->status = status;
+  tc->error = grpc_error_set_int(
+      grpc_error_set_str(GRPC_ERROR_CREATE(description),
+                         GRPC_ERROR_STR_GRPC_MESSAGE, description),
+      GRPC_ERROR_INT_GRPC_STATUS, status);
 
   return terminate_with_status(exec_ctx, tc);
 }
@@ -826,13 +837,15 @@ static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
 static grpc_call_error close_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
                                          grpc_status_code status,
                                          const char *description) {
+  GPR_ASSERT(status != GRPC_STATUS_OK);
   termination_closure *tc = gpr_malloc(sizeof(*tc));
   memset(tc, 0, sizeof(termination_closure));
   tc->type = TC_CLOSE;
   tc->call = c;
-  tc->optional_message = gpr_slice_from_copied_string(description);
-  GPR_ASSERT(status != GRPC_STATUS_OK);
-  tc->status = status;
+  tc->error = grpc_error_set_int(
+      grpc_error_set_str(GRPC_ERROR_CREATE(description),
+                         GRPC_ERROR_STR_GRPC_MESSAGE, description),
+      GRPC_ERROR_INT_GRPC_STATUS, status);
 
   return terminate_with_status(exec_ctx, tc);
 }
@@ -1194,7 +1207,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
     } else if (grpc_compression_options_is_algorithm_enabled(
                    &compression_options, algo) == 0) {
       /* check if algorithm is supported by current channel config */
-      char *algo_name;
+      char *algo_name = NULL;
       grpc_compression_algorithm_name(algo, &algo_name);
       gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
                    algo_name);
@@ -1213,7 +1226,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
                   call->incoming_compression_algorithm)) {
     extern int grpc_compression_trace;
     if (grpc_compression_trace) {
-      char *algo_name;
+      char *algo_name = NULL;
       grpc_compression_algorithm_name(call->incoming_compression_algorithm,
                                       &algo_name);
       gpr_log(GPR_ERROR,
@@ -1414,7 +1427,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
           const grpc_compression_algorithm calgo =
               compression_algorithm_for_level_locked(
                   call, effective_compression_level);
-          char *calgo_name;
+          char *calgo_name = NULL;
           grpc_compression_algorithm_name(calgo, &calgo_name);
           // the following will be picked up by the compress filter and used as
           // the call's compression algorithm.
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index b640345c21d23d78f231ac37d1e6f68e12f6dc61..3a78fe3aa36b8baf90825911c2762791dbe7f5ba 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -37,7 +37,6 @@
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/channel/context.h"
 #include "src/core/lib/surface/api_trace.h"
-#include "src/core/lib/surface/surface_trace.h"
 
 #include <grpc/grpc.h>
 #include <grpc/impl/codegen/compression_types.h>
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 8936e7164f418ab6e5701fe27e61f71427011c02..6d2b1c4935273b0394e6e9ae1a60f73ca9353f14 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -81,7 +81,7 @@ struct grpc_channel {
   CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem))
 
 /* the protobuf library will (by default) start warning at 100megs */
-#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
+#define DEFAULT_MAX_MESSAGE_LENGTH (4 * 1024 * 1024)
 
 static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
                             grpc_error *error);
@@ -223,11 +223,12 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
       "grpc_channel_create_call("
       "channel=%p, parent_call=%p, propagation_mask=%x, cq=%p, method=%s, "
       "host=%s, "
-      "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+      "deadline=gpr_timespec { tv_sec: %" PRId64
+      ", tv_nsec: %d, clock_type: %d }, "
       "reserved=%p)",
-      10, (channel, parent_call, (unsigned)propagation_mask, cq, method, host,
-           (long long)deadline.tv_sec, (int)deadline.tv_nsec,
-           (int)deadline.clock_type, reserved));
+      10,
+      (channel, parent_call, (unsigned)propagation_mask, cq, method, host,
+       deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type, reserved));
   GPR_ASSERT(!reserved);
   return grpc_channel_create_call_internal(
       channel, parent_call, propagation_mask, cq, NULL,
@@ -282,11 +283,12 @@ grpc_call *grpc_channel_create_registered_call(
       "grpc_channel_create_registered_call("
       "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
       "registered_call_handle=%p, "
-      "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+      "deadline=gpr_timespec { tv_sec: %" PRId64
+      ", tv_nsec: %d, clock_type: %d }, "
       "reserved=%p)",
       9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
-          registered_call_handle, (long long)deadline.tv_sec,
-          (int)deadline.tv_nsec, (int)deadline.clock_type, reserved));
+          registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
+          (int)deadline.clock_type, reserved));
   GPR_ASSERT(!reserved);
   return grpc_channel_create_call_internal(
       channel, parent_call, propagation_mask, completion_queue, NULL,
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 2cc6aa74e02aeeac9f1da96f4b95dd6a8961e1ed..5978884db83ee079681b0dd8c4897d64254456c4 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -48,7 +48,6 @@
 #include "src/core/lib/surface/api_trace.h"
 #include "src/core/lib/surface/call.h"
 #include "src/core/lib/surface/event_string.h"
-#include "src/core/lib/surface/surface_trace.h"
 
 int grpc_trace_operation_failures;
 
@@ -93,6 +92,17 @@ struct grpc_completion_queue {
 static gpr_mu g_freelist_mu;
 static grpc_completion_queue *g_freelist;
 
+int grpc_cq_pluck_trace;
+int grpc_cq_event_timeout_trace;
+
+#define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event)                  \
+  if (grpc_api_trace &&                                               \
+      (grpc_cq_pluck_trace || (event)->type != GRPC_QUEUE_TIMEOUT)) { \
+    char *_ev = grpc_event_string(event);                             \
+    gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev);               \
+    gpr_free(_ev);                                                    \
+  }
+
 static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
                                      grpc_error *error);
 
@@ -240,7 +250,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
         "grpc_cq_end_op(exec_ctx=%p, cc=%p, tag=%p, error=%s, done=%p, "
         "done_arg=%p, storage=%p)",
         7, (exec_ctx, cc, tag, errmsg, done, done_arg, storage));
-    if (grpc_trace_operation_failures) {
+    if (grpc_trace_operation_failures && error != GRPC_ERROR_NONE) {
       gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
     }
     grpc_error_free_string(errmsg);
@@ -316,10 +326,11 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
   GRPC_API_TRACE(
       "grpc_completion_queue_next("
       "cc=%p, "
-      "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
+      "deadline=gpr_timespec { tv_sec: %" PRId64
+      ", tv_nsec: %d, clock_type: %d }, "
       "reserved=%p)",
-      5, (cc, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
-          (int)deadline.clock_type, reserved));
+      5, (cc, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
+          reserved));
   GPR_ASSERT(!reserved);
 
   deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -425,13 +436,16 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
 
   GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
 
-  GRPC_API_TRACE(
-      "grpc_completion_queue_pluck("
-      "cc=%p, tag=%p, "
-      "deadline=gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
-      "reserved=%p)",
-      6, (cc, tag, (long long)deadline.tv_sec, (int)deadline.tv_nsec,
-          (int)deadline.clock_type, reserved));
+  if (grpc_cq_pluck_trace) {
+    GRPC_API_TRACE(
+        "grpc_completion_queue_pluck("
+        "cc=%p, tag=%p, "
+        "deadline=gpr_timespec { tv_sec: %" PRId64
+        ", tv_nsec: %d, clock_type: %d }, "
+        "reserved=%p)",
+        6, (cc, tag, deadline.tv_sec, deadline.tv_nsec,
+            (int)deadline.clock_type, reserved));
+  }
   GPR_ASSERT(!reserved);
 
   deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index b9dd1092b68b46fd3653e375f5d32800a55b8b64..3049284f68620f8f15490433b79726fbd291e35a 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -39,6 +39,10 @@
 #include <grpc/grpc.h>
 #include "src/core/lib/iomgr/pollset.h"
 
+/* These trace flags default to 1. The corresponding lines are only traced
+   if grpc_api_trace is also truthy */
+extern int grpc_cq_pluck_trace;
+extern int grpc_cq_event_timeout_trace;
 extern int grpc_trace_operation_failures;
 
 typedef struct grpc_cq_completion {
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index f07039cb94c135b61e869085b0d9e7a981d09612..5397913a21ff6d460c4294bd1af094b8a454c4f0 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -57,7 +57,6 @@
 #include "src/core/lib/surface/init.h"
 #include "src/core/lib/surface/lame_client.h"
 #include "src/core/lib/surface/server.h"
-#include "src/core/lib/surface/surface_trace.h"
 #include "src/core/lib/transport/connectivity_state.h"
 #include "src/core/lib/transport/transport_impl.h"
 
@@ -165,6 +164,12 @@ void grpc_init(void) {
                          &grpc_trace_channel_stack_builder);
     grpc_register_tracer("http1", &grpc_http1_trace);
     grpc_register_tracer("compression", &grpc_compression_trace);
+    grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
+    // Default pluck trace to 1
+    grpc_cq_pluck_trace = 1;
+    grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
+    // Default timeout trace to 1
+    grpc_cq_event_timeout_trace = 1;
     grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
     grpc_security_pre_init();
     grpc_iomgr_init();
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
index aca76d2bb79a0516bafa3c2c9645d912a9f7b322..19420750545037ae9b40bb49829273b2e50a8d15 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.c
@@ -36,4 +36,4 @@
 
 #include <grpc/grpc.h>
 
-const char *grpc_version_string(void) { return "0.15.0-dev"; }
+const char *grpc_version_string(void) { return "1.1.0-dev"; }
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 1105494a85cd1dc98e6c897e65b312da13245fa4..857c3909d26475236f002e21c1f0dee4edbd466d 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -36,6 +36,7 @@
 #include <grpc/support/atm.h>
 #include <grpc/support/log.h>
 #include <grpc/support/sync.h>
+#include "src/core/lib/support/string.h"
 #include "src/core/lib/transport/transport_impl.h"
 
 #ifdef GRPC_STREAM_REFCOUNT_DEBUG
@@ -162,55 +163,87 @@ void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
   grpc_exec_ctx_sched(exec_ctx, op->on_complete, error, NULL);
 }
 
-void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
-                                               grpc_status_code status) {
-  GPR_ASSERT(status != GRPC_STATUS_OK);
-  if (op->cancel_with_status == GRPC_STATUS_OK) {
-    op->cancel_with_status = status;
-  }
-  if (op->close_with_status != GRPC_STATUS_OK) {
-    op->close_with_status = GRPC_STATUS_OK;
-    if (op->optional_close_message != NULL) {
-      gpr_slice_unref(*op->optional_close_message);
-      op->optional_close_message = NULL;
-    }
-  }
-}
-
 typedef struct {
-  gpr_slice message;
+  grpc_error *error;
   grpc_closure *then_call;
   grpc_closure closure;
 } close_message_data;
 
 static void free_message(grpc_exec_ctx *exec_ctx, void *p, grpc_error *error) {
   close_message_data *cmd = p;
-  gpr_slice_unref(cmd->message);
+  GRPC_ERROR_UNREF(cmd->error);
   if (cmd->then_call != NULL) {
-    cmd->then_call->cb(exec_ctx, cmd->then_call->cb_arg, GRPC_ERROR_REF(error));
+    cmd->then_call->cb(exec_ctx, cmd->then_call->cb_arg, error);
   }
   gpr_free(cmd);
 }
 
+static void add_error(grpc_transport_stream_op *op, grpc_error **which,
+                      grpc_error *error) {
+  close_message_data *cmd;
+  cmd = gpr_malloc(sizeof(*cmd));
+  cmd->error = error;
+  cmd->then_call = op->on_complete;
+  grpc_closure_init(&cmd->closure, free_message, cmd);
+  op->on_complete = &cmd->closure;
+  *which = error;
+}
+
+void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
+                                               grpc_status_code status) {
+  GPR_ASSERT(status != GRPC_STATUS_OK);
+  if (op->cancel_error == GRPC_ERROR_NONE) {
+    op->cancel_error = grpc_error_set_int(GRPC_ERROR_CANCELLED,
+                                          GRPC_ERROR_INT_GRPC_STATUS, status);
+    op->close_error = GRPC_ERROR_NONE;
+  }
+}
+
+void grpc_transport_stream_op_add_cancellation_with_message(
+    grpc_transport_stream_op *op, grpc_status_code status,
+    gpr_slice *optional_message) {
+  GPR_ASSERT(status != GRPC_STATUS_OK);
+  if (op->cancel_error != GRPC_ERROR_NONE) {
+    if (optional_message) {
+      gpr_slice_unref(*optional_message);
+    }
+    return;
+  }
+  grpc_error *error;
+  if (optional_message != NULL) {
+    char *msg = gpr_dump_slice(*optional_message, GPR_DUMP_ASCII);
+    error = grpc_error_set_str(GRPC_ERROR_CREATE(msg),
+                               GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+    gpr_free(msg);
+    gpr_slice_unref(*optional_message);
+  } else {
+    error = GRPC_ERROR_CREATE("Call cancelled");
+  }
+  error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, status);
+  add_error(op, &op->close_error, error);
+}
+
 void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
                                         grpc_status_code status,
                                         gpr_slice *optional_message) {
-  close_message_data *cmd;
   GPR_ASSERT(status != GRPC_STATUS_OK);
-  if (op->cancel_with_status != GRPC_STATUS_OK ||
-      op->close_with_status != GRPC_STATUS_OK) {
+  if (op->cancel_error != GRPC_ERROR_NONE ||
+      op->close_error != GRPC_ERROR_NONE) {
     if (optional_message) {
       gpr_slice_unref(*optional_message);
     }
     return;
   }
-  if (optional_message) {
-    cmd = gpr_malloc(sizeof(*cmd));
-    cmd->message = *optional_message;
-    cmd->then_call = op->on_complete;
-    grpc_closure_init(&cmd->closure, free_message, cmd);
-    op->on_complete = &cmd->closure;
-    op->optional_close_message = &cmd->message;
+  grpc_error *error;
+  if (optional_message != NULL) {
+    char *msg = gpr_dump_slice(*optional_message, GPR_DUMP_ASCII);
+    error = grpc_error_set_str(GRPC_ERROR_CREATE(msg),
+                               GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+    gpr_free(msg);
+    gpr_slice_unref(*optional_message);
+  } else {
+    error = GRPC_ERROR_CREATE("Call force closed");
   }
-  op->close_with_status = status;
+  error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, status);
+  add_error(op, &op->close_error, error);
 }
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index a46ccb643ce5f1feddf0c72f384f964aeee3cde6..08c0a237c977fa5d4ec680739a37a0b3adc918ac 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -135,13 +135,12 @@ typedef struct grpc_transport_stream_op {
   /** Collect any stats into provided buffer, zero internal stat counters */
   grpc_transport_stream_stats *collect_stats;
 
-  /** If != GRPC_STATUS_OK, cancel this stream */
-  grpc_status_code cancel_with_status;
+  /** If != GRPC_ERROR_NONE, cancel this stream */
+  grpc_error *cancel_error;
 
-  /** If != GRPC_STATUS_OK, send grpc-status, grpc-message, and close this
+  /** If != GRPC_ERROR, send grpc-status, grpc-message, and close this
       stream for both reading and writing */
-  grpc_status_code close_with_status;
-  gpr_slice *optional_close_message;
+  grpc_error *close_error;
 
   /* Indexes correspond to grpc_context_index enum values */
   grpc_call_context_element *context;
@@ -222,6 +221,10 @@ void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
 void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
                                                grpc_status_code status);
 
+void grpc_transport_stream_op_add_cancellation_with_message(
+    grpc_transport_stream_op *op, grpc_status_code status,
+    gpr_slice *optional_message);
+
 void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
                                         grpc_status_code status,
                                         gpr_slice *optional_message);
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index df04c6112703bce21250ac42d02966bc59236a03..138591db2a39592397febd5cc890b9e8022ae85e 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -63,8 +63,8 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
   }
   if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
     char *tmp;
-    gpr_asprintf(&tmp, " deadline=%lld.%09d", (long long)md.deadline.tv_sec,
-                 (int)md.deadline.tv_nsec);
+    gpr_asprintf(&tmp, " deadline=%" PRId64 ".%09d", md.deadline.tv_sec,
+                 md.deadline.tv_nsec);
     gpr_strvec_add(b, tmp);
   }
 }
@@ -119,10 +119,21 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
     gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
   }
 
-  if (op->cancel_with_status != GRPC_STATUS_OK) {
+  if (op->cancel_error != GRPC_ERROR_NONE) {
     if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
     first = 0;
-    gpr_asprintf(&tmp, "CANCEL:%d", op->cancel_with_status);
+    const char *msg = grpc_error_string(op->cancel_error);
+    gpr_asprintf(&tmp, "CANCEL:%s", msg);
+    grpc_error_free_string(msg);
+    gpr_strvec_add(&b, tmp);
+  }
+
+  if (op->close_error != GRPC_ERROR_NONE) {
+    if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+    first = 0;
+    const char *msg = grpc_error_string(op->close_error);
+    gpr_asprintf(&tmp, "CLOSE:%s", msg);
+    grpc_error_free_string(msg);
     gpr_strvec_add(&b, tmp);
   }
 
diff --git a/src/cpp/README.md b/src/cpp/README.md
index f2935e52d9e50b3489836bf32c6fb299b3f0ebca..8c0f85e5ff795445478c9dc56d227dbc1f14d2be 100644
--- a/src/cpp/README.md
+++ b/src/cpp/README.md
@@ -51,7 +51,7 @@ below.
 #Build from Source
 
 ```sh
- $ git clone https://github.com/grpc/grpc.git
+ $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
  $ cd grpc
  $ git submodule update --init
  $ make
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index cc35aa69bab12c7b1980c3eb4761decf96ff8d8c..3d6780bcb8c4f8a573510e31062f480432460e92 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -74,9 +74,9 @@ void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
   ::grpc_byte_buffer_destroy(bb);
 }
 
-void CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
-                                               grpc_byte_buffer* buffer) {
-  ::grpc_byte_buffer_reader_init(reader, buffer);
+int CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+                                              grpc_byte_buffer* buffer) {
+  return ::grpc_byte_buffer_reader_init(reader, buffer);
 }
 
 void CoreCodegen::grpc_byte_buffer_reader_destroy(
diff --git a/src/cpp/util/byte_buffer.cc b/src/cpp/util/byte_buffer.cc
index c0a14de418af5022bf67ff52dc9350104a960f4a..c2cd20ee07f0a07af63a2b8b918c3da7e685dbf0 100644
--- a/src/cpp/util/byte_buffer.cc
+++ b/src/cpp/util/byte_buffer.cc
@@ -58,18 +58,22 @@ void ByteBuffer::Clear() {
   }
 }
 
-void ByteBuffer::Dump(std::vector<Slice>* slices) const {
+Status ByteBuffer::Dump(std::vector<Slice>* slices) const {
   slices->clear();
   if (!buffer_) {
-    return;
+    return Status(StatusCode::FAILED_PRECONDITION, "Buffer not initialized");
   }
   grpc_byte_buffer_reader reader;
-  grpc_byte_buffer_reader_init(&reader, buffer_);
+  if (!grpc_byte_buffer_reader_init(&reader, buffer_)) {
+    return Status(StatusCode::INTERNAL,
+                  "Couldn't initialize byte buffer reader");
+  }
   gpr_slice s;
   while (grpc_byte_buffer_reader_next(&reader, &s)) {
     slices->push_back(Slice(s, Slice::STEAL_REF));
   }
   grpc_byte_buffer_reader_destroy(&reader);
+  return Status::OK;
 }
 
 size_t ByteBuffer::Length() const {
diff --git a/src/csharp/.gitignore b/src/csharp/.gitignore
index 0f96a4822194b96c319fe9e62982b22b0e2272f4..fc2875a1dd510d72630c1f29b2170b672a9d4278 100644
--- a/src/csharp/.gitignore
+++ b/src/csharp/.gitignore
@@ -1,5 +1,7 @@
+*.xproj.user
 *.userprefs
 *.csproj.user
+*.lock.json
 StyleCop.Cache
 test-results
 packages
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
index 3acea7d2f83882b4274ee13489fd62f84f4eca89..1fa14fc3dfb2031a69ce22ab27cb5b4e924cdf5c 100644
--- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
@@ -81,6 +81,7 @@
   </ItemGroup>
   <ItemGroup>
     <None Include="Grpc.Auth.nuspec" />
+    <None Include="Grpc.Auth.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.project.json b/src/csharp/Grpc.Auth/Grpc.Auth.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.xproj b/src/csharp/Grpc.Auth/Grpc.Auth.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..dd3d94c574aab0ab6b5ca69bb7a82d889b75807c
--- /dev/null
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>c82631ed-06d1-4458-87bc-8257d12307a8</ProjectGuid>
+    <RootNamespace>Grpc.Auth</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\Grpc.Core\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Auth/project.json b/src/csharp/Grpc.Auth/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..08429f1d466065b07ecd806ab9ba643e3a107deb
--- /dev/null
+++ b/src/csharp/Grpc.Auth/project.json
@@ -0,0 +1,41 @@
+{
+  "version": "1.1.0-dev",
+  "title": "gRPC C# Auth",
+  "authors": [ "Google Inc." ],
+  "copyright": "Copyright 2015, Google Inc.",
+  "packOptions": {
+    "summary": "Auth library for C# implementation of gRPC - an RPC library and framework",
+    "description": "Auth library for C# implementation of gRPC - an RPC library and framework. See project site for more info.",
+    "owners": [ "grpc-packages" ],
+    "licenseUrl": "https://github.com/grpc/grpc/blob/master/LICENSE",
+    "projectUrl": "https://github.com/grpc/grpc",
+    "requireLicenseAcceptance": false,
+    "tags": [ "gRPC RPC Protocol HTTP/2 Auth OAuth2" ],
+  },
+  "buildOptions": {
+    "define": [ "SIGNED" ],
+    "keyFile": "../keys/Grpc.snk",
+    "publicSign": true,
+    "xmlDoc": true,
+    "compile": {
+      "includeFiles": [ "../Grpc.Core/Version.cs" ]
+    }
+  },
+  "dependencies": {
+    "Grpc.Core": "1.1.0-dev",
+    "Google.Apis.Auth": "1.11.1"
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "net45"
+      ],
+      "dependencies": {
+        "Microsoft.NETCore.Portable.Compatibility": "1.0.1-rc2-24027",
+        "NETStandard.Library": "1.5.0-rc2-24027",
+        "System.Threading.Tasks": "4.0.11-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs b/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
index e605a310f9ef40122dbd70ec5d3937267b4b1c5d..064bc13cabb4ee7033137cf13c67b7562cc16f6c 100644
--- a/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
+++ b/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
@@ -32,13 +32,7 @@
 #endregion
 
 using System;
-using System.Diagnostics;
-using System.Linq;
-using System.Reflection;
-using System.Threading;
 using System.Threading.Tasks;
-using Grpc.Core;
-using Grpc.Core.Internal;
 using Grpc.Core.Utils;
 using NUnit.Framework;
 
@@ -46,6 +40,13 @@ namespace Grpc.Core.Tests
 {
     public class AppDomainUnloadTest
     {
+#if NETSTANDARD1_5
+        [Test]
+        [Ignore("Not supported for CoreCLR")]
+        public void AppDomainUnloadHookCanCleanupAbandonedCall()
+        {
+        }
+#else
         [Test]
         public void AppDomainUnloadHookCanCleanupAbandonedCall()
         {
@@ -86,5 +87,6 @@ namespace Grpc.Core.Tests
                 readyToShutdown.Task.Wait();  // make sure handler is running
             }
         }
+#endif
     }
 }
diff --git a/src/csharp/Grpc.Core.Tests/CompressionTest.cs b/src/csharp/Grpc.Core.Tests/CompressionTest.cs
index 378c81851c0b561bf6d441d92790e3b1bec997a1..16be210508ac042f9a1827efef8fa695502661e1 100644
--- a/src/csharp/Grpc.Core.Tests/CompressionTest.cs
+++ b/src/csharp/Grpc.Core.Tests/CompressionTest.cs
@@ -34,6 +34,7 @@
 using System;
 using System.Diagnostics;
 using System.Linq;
+using System.Text;
 using System.Threading;
 using System.Threading.Tasks;
 using Grpc.Core;
@@ -118,5 +119,30 @@ namespace Grpc.Core.Tests
 
             await call.ResponseStream.ToListAsync();
         }
+
+        [Test]
+        public void CanReadCompressedMessages()
+        {
+            var compressionMetadata = new Metadata
+            {
+                { new Metadata.Entry(Metadata.CompressionRequestAlgorithmMetadataKey, "gzip") }
+            };
+
+            helper.UnaryHandler = new UnaryServerMethod<string, string>(async (req, context) =>
+            {
+                await context.WriteResponseHeadersAsync(compressionMetadata);
+                return req;
+            });
+
+            var stringBuilder = new StringBuilder();
+            for (int i = 0; i < 200000; i++)
+            {
+                stringBuilder.Append('a');
+            }
+            var request = stringBuilder.ToString();
+            var response = Calls.BlockingUnaryCall(helper.CreateUnaryCall(new CallOptions(compressionMetadata)), request);
+
+            Assert.AreEqual(request, response);
+        }
     }
 }
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index 074c9603dcf6e4fd62d95bdb4cebab24ed5e7a97..f6c226567d973a38f47ea35a567da7362e419eab 100644
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -99,6 +99,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.Core.Tests.project.json" />
     <None Include="packages.config">
       <SubType>Designer</SubType>
     </None>
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.project.json b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.xproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..05823291542728459acbb90d04b099c42f12a295
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>759e23b2-fc04-4695-902d-b073cded3599</ProjectGuid>
+    <RootNamespace>Grpc.Core.Tests</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Core.Tests/NUnitMain.cs b/src/csharp/Grpc.Core.Tests/NUnitMain.cs
index 9c1d7bf3c8ae016b080d6983a96920b85759e977..24a9f846d10891e18273afbd110c71c3b35daf0d 100644
--- a/src/csharp/Grpc.Core.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.Core.Tests/NUnitMain.cs
@@ -49,7 +49,7 @@ namespace Grpc.Core.Tests
         {
             // Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
             GrpcEnvironment.SetLogger(new TextWriterLogger(Console.Error));
-#if DOTNET5_4
+#if NETSTANDARD1_5
             return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
 #else
             return new AutoRun().Execute(args);
diff --git a/src/csharp/Grpc.Core.Tests/NUnitVersionTest.cs b/src/csharp/Grpc.Core.Tests/NUnitVersionTest.cs
index 3fa6ad09c02ac7d3dd0acf6abf70d467512751ed..1a9e441611d39e38cb9d54a603ce13b41ee52e80 100644
--- a/src/csharp/Grpc.Core.Tests/NUnitVersionTest.cs
+++ b/src/csharp/Grpc.Core.Tests/NUnitVersionTest.cs
@@ -56,7 +56,7 @@ namespace Grpc.Core.Tests
                 Console.Error.WriteLine("You are using and old version of NUnit that doesn't support async tests and skips them instead. " +
                 "This test has failed to indicate that.");
                 Console.Error.Flush();
-                Environment.Exit(1);
+                throw new Exception("NUnitVersionTest has failed.");
             }
         }
 
diff --git a/src/csharp/Grpc.Core.Tests/SanityTest.cs b/src/csharp/Grpc.Core.Tests/SanityTest.cs
index 3830f0cbacf4f34280c485906735f81f18efb0d5..501992c56959372f22cd0a05f5cf9e4af99706a3 100644
--- a/src/csharp/Grpc.Core.Tests/SanityTest.cs
+++ b/src/csharp/Grpc.Core.Tests/SanityTest.cs
@@ -45,6 +45,8 @@ namespace Grpc.Core.Tests
 {
     public class SanityTest
     {
+        // TODO: make sanity test work for CoreCLR as well
+#if !NETSTANDARD1_5
         /// <summary>
         /// Because we depend on a native library, sometimes when things go wrong, the
         /// entire NUnit test process crashes. To be able to track down problems better,
@@ -121,5 +123,6 @@ namespace Grpc.Core.Tests
             }
             return result;
         }
+#endif
     }
 }
diff --git a/src/csharp/Grpc.Core.Tests/project.json b/src/csharp/Grpc.Core.Tests/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..f58bcbb5159e7ddd38f6e8016bab5a8c9cdb23b8
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/project.json
@@ -0,0 +1,70 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.Core": {
+      "target": "project"
+    },
+    "Newtonsoft.Json": "8.0.3",
+    "NUnit": "3.2.0",
+    "NUnitLite": "3.2.0-*"
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  },
+}
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index a796911b99fb61aac4c9f131ae55d3328e4b4d3d..1952ee37121de6ea862772c16db5d0cc42a81742 100644
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -141,6 +141,7 @@
   </ItemGroup>
   <ItemGroup>
     <None Include="Grpc.Core.nuspec" />
+    <None Include="Grpc.Core.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
   <Import Project="NativeDeps.targets" />
@@ -148,7 +149,7 @@
   <ItemGroup />
   <ItemGroup>
     <EmbeddedResource Include="..\..\..\etc\roots.pem">
-      <Link>Resources\roots.pem</Link>
+      <Link>roots.pem</Link>
     </EmbeddedResource>
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Core/Grpc.Core.nuspec b/src/csharp/Grpc.Core/Grpc.Core.nuspec
index 0ada0049c2abe9d62ec841909e4a538541d1248e..47593f787b1a2e084cf06d950ca0d3775940e5bd 100644
--- a/src/csharp/Grpc.Core/Grpc.Core.nuspec
+++ b/src/csharp/Grpc.Core/Grpc.Core.nuspec
@@ -24,11 +24,12 @@
     <file src="bin/ReleaseSigned/Grpc.Core.xml" target="lib/net45" />
     <file src="**\*.cs" target="src" />
     <file src="Grpc.Core.targets" target="\build\net45\Grpc.Core.targets" />
-    <file src="windows_x86/grpc_csharp_ext.dll" target="/build/native/bin/windows_x86/grpc_csharp_ext.dll" />
-    <file src="windows_x64/grpc_csharp_ext.dll" target="/build/native/bin/windows_x64/grpc_csharp_ext.dll" />
-    <file src="linux_x86/libgrpc_csharp_ext.so" target="/build/native/bin/linux_x86/libgrpc_csharp_ext.so" />
-    <file src="linux_x64/libgrpc_csharp_ext.so" target="/build/native/bin/linux_x64/libgrpc_csharp_ext.so" />
-    <file src="macosx_x86/libgrpc_csharp_ext.dylib" target="/build/native/bin/macosx_x86/libgrpc_csharp_ext.dylib" />
-    <file src="macosx_x64/libgrpc_csharp_ext.dylib" target="/build/native/bin/macosx_x64/libgrpc_csharp_ext.dylib" />
+    <!-- without backslashes in the the source path, nuget won't copy the files -->
+    <file src="..\nativelibs\windows_x86\grpc_csharp_ext.dll" target="/build/native/bin/windows_x86/grpc_csharp_ext.dll" />
+    <file src="..\nativelibs\windows_x64\grpc_csharp_ext.dll" target="/build/native/bin/windows_x64/grpc_csharp_ext.dll" />
+    <file src="..\nativelibs\linux_x86\libgrpc_csharp_ext.so" target="/build/native/bin/linux_x86/libgrpc_csharp_ext.so" />
+    <file src="..\nativelibs\linux_x64\libgrpc_csharp_ext.so" target="/build/native/bin/linux_x64/libgrpc_csharp_ext.so" />
+    <file src="..\nativelibs\macosx_x86\libgrpc_csharp_ext.dylib" target="/build/native/bin/macosx_x86/libgrpc_csharp_ext.dylib" />
+    <file src="..\nativelibs\macosx_x64\libgrpc_csharp_ext.dylib" target="/build/native/bin/macosx_x64/libgrpc_csharp_ext.dylib" />
   </files>
 </package>
diff --git a/src/csharp/Grpc.Core/Grpc.Core.project.json b/src/csharp/Grpc.Core/Grpc.Core.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Core/Grpc.Core.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Core/Grpc.Core.xproj b/src/csharp/Grpc.Core/Grpc.Core.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..137236ffdb61daa99a602132898d869bb2c35f14
--- /dev/null
+++ b/src/csharp/Grpc.Core/Grpc.Core.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>dc9908b6-f291-4fc8-a46d-2ea2551790ec</ProjectGuid>
+    <RootNamespace>Grpc.Core</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Core/GrpcEnvironment.cs b/src/csharp/Grpc.Core/GrpcEnvironment.cs
index e9e4cb4cbb303395c5f888f8c0bd3cc948331156..eeed6997123fdb455981434faee6e120ce133a44 100644
--- a/src/csharp/Grpc.Core/GrpcEnvironment.cs
+++ b/src/csharp/Grpc.Core/GrpcEnvironment.cs
@@ -105,7 +105,7 @@ namespace Grpc.Core
 
             if (instanceToShutdown != null)
             {
-                await instanceToShutdown.ShutdownAsync();
+                await instanceToShutdown.ShutdownAsync().ConfigureAwait(false);
             }
         }
 
@@ -352,8 +352,12 @@ namespace Grpc.Core
                 {
                     if (!hooksRegistered)
                     {
+                        // TODO(jtattermusch): register shutdownhooks for CoreCLR as well
+#if !NETSTANDARD1_5
+
                         AppDomain.CurrentDomain.ProcessExit += ShutdownHookHandler;
                         AppDomain.CurrentDomain.DomainUnload += ShutdownHookHandler;
+#endif
                     }
                     hooksRegistered = true;
                 }
diff --git a/src/csharp/Grpc.Core/Internal/DefaultSslRootsOverride.cs b/src/csharp/Grpc.Core/Internal/DefaultSslRootsOverride.cs
index aa4dafd7f2aa03ba72bc0540ad3756043eda26a3..2a96e9920c06f11ba949ae78bbe48258a1efd64c 100644
--- a/src/csharp/Grpc.Core/Internal/DefaultSslRootsOverride.cs
+++ b/src/csharp/Grpc.Core/Internal/DefaultSslRootsOverride.cs
@@ -46,7 +46,7 @@ namespace Grpc.Core.Internal
     /// </summary>
     internal static class DefaultSslRootsOverride
     {
-        const string RootsPemResourceName = "Grpc.Core.Resources.roots.pem";
+        const string RootsPemResourceName = "Grpc.Core.roots.pem";
         static object staticLock = new object();
 
         /// <summary>
diff --git a/src/csharp/Grpc.Core/Internal/NativeExtension.cs b/src/csharp/Grpc.Core/Internal/NativeExtension.cs
index b45ba19c24d798518993ec0b9eeffc6bc47dd39c..a6d792581627ffd81e048adfc89e0c795f0b274e 100644
--- a/src/csharp/Grpc.Core/Internal/NativeExtension.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeExtension.cs
@@ -117,8 +117,8 @@ namespace Grpc.Core.Internal
         private static string GetAssemblyPath()
         {
             var assembly = typeof(NativeExtension).GetTypeInfo().Assembly;
-#if DOTNET5_4
-            // Assembly.EscapedCodeBase does not exit under CoreCLR, but assemblies imported from a nuget package
+#if NETSTANDARD1_5
+            // Assembly.EscapedCodeBase does not exist under CoreCLR, but assemblies imported from a nuget package
             // don't seem to be shadowed by DNX-based projects at all.
             return assembly.Location;
 #else
@@ -136,7 +136,7 @@ namespace Grpc.Core.Internal
 #endif
         }
 
-#if !DOTNET5_4
+#if !NETSTANDARD1_5
         private static bool IsFileUri(string uri)
         {
             return uri.ToLowerInvariant().StartsWith(Uri.UriSchemeFile);
diff --git a/src/csharp/Grpc.Core/Internal/PlatformApis.cs b/src/csharp/Grpc.Core/Internal/PlatformApis.cs
index 5d8c44b589e848e402594ddcd6cc9cf93b2a3f77..15391ddc647ab7889129bf82030c3896a3ec14e8 100644
--- a/src/csharp/Grpc.Core/Internal/PlatformApis.cs
+++ b/src/csharp/Grpc.Core/Internal/PlatformApis.cs
@@ -53,7 +53,7 @@ namespace Grpc.Core.Internal
 
         static PlatformApis()
         {
-#if DNXCORE50
+#if NETSTANDARD1_5
             isLinux = RuntimeInformation.IsOSPlatform(OSPlatform.Linux);
             isMacOSX = RuntimeInformation.IsOSPlatform(OSPlatform.OSX);
             isWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows);
diff --git a/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs b/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs
index 702aea2883ad828c6f0b8e6675bb2b0c56522f92..230faacff6331ff6c90fad437a1c9434d3d52f39 100644
--- a/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs
+++ b/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs
@@ -39,7 +39,7 @@ namespace Grpc.Core.Internal
     /// <summary>
     /// Safe handle to wrap native objects.
     /// </summary>
-    internal abstract class SafeHandleZeroIsInvalid : SafeHandle
+    internal abstract class SafeHandleZeroIsInvalid : System.Runtime.InteropServices.SafeHandle
     {
         public SafeHandleZeroIsInvalid() : base(IntPtr.Zero, true)
         {
diff --git a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
index 85813027064f47359be46b25bdb3bf8cf5e0736e..014a8db78f2bb6629b9336377fbf18ee35a5b7e6 100644
--- a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
@@ -54,7 +54,10 @@ namespace Grpc.Core.Internal
 
         public void RegisterCompletionQueue(CompletionQueueSafeHandle cq)
         {
-            Native.grpcsharp_server_register_completion_queue(this, cq);
+            using (cq.NewScope())
+            {
+                Native.grpcsharp_server_register_completion_queue(this, cq);
+            }
         }
 
         public int AddInsecurePort(string addr)
@@ -74,16 +77,22 @@ namespace Grpc.Core.Internal
     
         public void ShutdownAndNotify(BatchCompletionDelegate callback, CompletionQueueSafeHandle completionQueue)
         {
-            var ctx = BatchContextSafeHandle.Create();
-            completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
-            Native.grpcsharp_server_shutdown_and_notify_callback(this, completionQueue, ctx);
+            using (completionQueue.NewScope())
+            {
+                var ctx = BatchContextSafeHandle.Create();
+                completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
+                Native.grpcsharp_server_shutdown_and_notify_callback(this, completionQueue, ctx);
+            }
         }
 
         public void RequestCall(BatchCompletionDelegate callback, CompletionQueueSafeHandle completionQueue)
         {
-            var ctx = BatchContextSafeHandle.Create();
-            completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
-            Native.grpcsharp_server_request_call(this, completionQueue, ctx).CheckOk();
+            using (completionQueue.NewScope())
+            {
+                var ctx = BatchContextSafeHandle.Create();
+                completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
+                Native.grpcsharp_server_request_call(this, completionQueue, ctx).CheckOk();
+            }
         }
 
         protected override bool ReleaseHandle()
diff --git a/src/csharp/Grpc.Core/Metadata.cs b/src/csharp/Grpc.Core/Metadata.cs
index f73f720094a39b0d6ca166dbf11547bb4915a812..915bec146c9878d5d7c5646b5e0d46d821ae5cf4 100644
--- a/src/csharp/Grpc.Core/Metadata.cs
+++ b/src/csharp/Grpc.Core/Metadata.cs
@@ -63,6 +63,13 @@ namespace Grpc.Core
         /// </summary>
         public static readonly Metadata Empty = new Metadata().Freeze();
 
+        /// <summary>
+        /// To be used in initial metadata to request specific compression algorithm
+        /// for given call. Direct selection of compression algorithms is an internal
+        /// feature and is not part of public API.
+        /// </summary>
+        internal const string CompressionRequestAlgorithmMetadataKey = "grpc-internal-encoding-request";
+
         readonly List<Entry> entries;
         bool readOnly;
 
diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs
index e1609341d9acd4d7b2a02d032cf239155df8b91d..553aeec58ac9716fde2c0b7143a47472418c8939 100644
--- a/src/csharp/Grpc.Core/VersionInfo.cs
+++ b/src/csharp/Grpc.Core/VersionInfo.cs
@@ -48,11 +48,11 @@ namespace Grpc.Core
         /// <summary>
         /// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies
         /// </summary>
-        public const string CurrentAssemblyFileVersion = "0.15.0.0";
+        public const string CurrentAssemblyFileVersion = "1.1.0.0";
 
         /// <summary>
         /// Current version of gRPC C#
         /// </summary>
-        public const string CurrentVersion = "0.15.0-dev";
+        public const string CurrentVersion = "1.1.0-dev";
     }
 }
diff --git a/src/csharp/Grpc.Core/project.json b/src/csharp/Grpc.Core/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..4545d26aa5d230eb7a0f722285bdf3e444a8ca4b
--- /dev/null
+++ b/src/csharp/Grpc.Core/project.json
@@ -0,0 +1,48 @@
+{
+  "version": "1.1.0-dev",
+  "title": "gRPC C# Core",
+  "authors": [ "Google Inc." ],
+  "copyright": "Copyright 2015, Google Inc.",
+  "packOptions": {
+    "summary": "Core C# implementation of gRPC - an RPC library and framework",
+    "description": "Core C# implementation of gRPC - an RPC library and framework. See project site for more info.",
+    "owners": [ "grpc-packages" ],
+    "licenseUrl": "https://github.com/grpc/grpc/blob/master/LICENSE",
+    "projectUrl": "https://github.com/grpc/grpc",
+    "requireLicenseAcceptance": false,
+    "tags": [ "gRPC RPC Protocol HTTP/2" ],
+    "files": {
+      "mappings": {
+        "build/net45/": "Grpc.Core.targets",
+        "build/native/bin/windows_x86/": "../nativelibs/windows_x86/grpc_csharp_ext.dll",
+        "build/native/bin/windows_x64/": "../nativelibs/windows_x64/grpc_csharp_ext.dll",
+        "build/native/bin/linux_x86/": "../nativelibs/linux_x86/libgrpc_csharp_ext.so",
+        "build/native/bin/linux_x64/": "../nativelibs/linux_x64/libgrpc_csharp_ext.so",
+        "build/native/bin/macosx_x86/": "../nativelibs/macosx_x86/libgrpc_csharp_ext.dylib",
+        "build/native/bin/macosx_x64/": "../nativelibs/macosx_x64/libgrpc_csharp_ext.dylib"
+      }
+    }
+  },
+  "buildOptions": {
+    "embed": [ "../../../etc/roots.pem" ],
+    "define": [ "SIGNED" ],
+    "keyFile": "../keys/Grpc.snk",
+    "publicSign": true,
+    "xmlDoc": true
+  },
+  "dependencies": {
+    "Ix-Async": "1.2.5"
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027",
+        "System.Threading.Thread": "4.0.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.Dotnet.sln b/src/csharp/Grpc.Dotnet.sln
new file mode 100644
index 0000000000000000000000000000000000000000..98b3cd54abbe508a1b4e0233cf6107c762e9e692
--- /dev/null
+++ b/src/csharp/Grpc.Dotnet.sln
@@ -0,0 +1,100 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.25123.0
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Core", "Grpc.Core\Grpc.Core.xproj", "{DC9908B6-F291-4FC8-A46D-2EA2551790EC}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Auth", "Grpc.Auth\Grpc.Auth.xproj", "{C82631ED-06D1-4458-87BC-8257D12307A8}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Core.Tests", "Grpc.Core.Tests\Grpc.Core.Tests.xproj", "{759E23B2-FC04-4695-902D-B073CDED3599}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Examples", "Grpc.Examples\Grpc.Examples.xproj", "{C77B792D-FC78-4CE2-9522-B40B0803C636}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Examples.MathClient", "Grpc.Examples.MathClient\Grpc.Examples.MathClient.xproj", "{FD48DECA-1622-4173-B1D9-2101CF5E7C5F}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Examples.MathServer", "Grpc.Examples.MathServer\Grpc.Examples.MathServer.xproj", "{58579368-5372-4E67-ACD6-9B59CB9FA698}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.Examples.Tests", "Grpc.Examples.Tests\Grpc.Examples.Tests.xproj", "{C61714A6-F633-44FB-97F4-C91F425C1D15}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.HealthCheck", "Grpc.HealthCheck\Grpc.HealthCheck.xproj", "{3BE4AD0B-2BF0-4D68-B625-F6018EF0DCFA}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.HealthCheck.Tests", "Grpc.HealthCheck.Tests\Grpc.HealthCheck.Tests.xproj", "{43DAFAC6-5343-4621-960E-A8A977EA3F0B}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.IntegrationTesting", "Grpc.IntegrationTesting\Grpc.IntegrationTesting.xproj", "{20354386-3E71-4046-A269-3BC2A06F3EC8}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.IntegrationTesting.Client", "Grpc.IntegrationTesting.Client\Grpc.IntegrationTesting.Client.xproj", "{48EA5BBE-70E2-4198-869D-D7E59C45F30D}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.IntegrationTesting.QpsWorker", "Grpc.IntegrationTesting.QpsWorker\Grpc.IntegrationTesting.QpsWorker.xproj", "{661B70D7-F56A-46E0-9B81-6227B591B5E7}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.IntegrationTesting.Server", "Grpc.IntegrationTesting.Server\Grpc.IntegrationTesting.Server.xproj", "{881F7AD1-A84E-47A2-9402-115C63C4031E}"
+EndProject
+Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Grpc.IntegrationTesting.StressClient", "Grpc.IntegrationTesting.StressClient\Grpc.IntegrationTesting.StressClient.xproj", "{0EBC910B-8867-4D3E-8686-91F34183D839}"
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|Any CPU = Debug|Any CPU
+		Release|Any CPU = Release|Any CPU
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{DC9908B6-F291-4FC8-A46D-2EA2551790EC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{DC9908B6-F291-4FC8-A46D-2EA2551790EC}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{DC9908B6-F291-4FC8-A46D-2EA2551790EC}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{DC9908B6-F291-4FC8-A46D-2EA2551790EC}.Release|Any CPU.Build.0 = Release|Any CPU
+		{C82631ED-06D1-4458-87BC-8257D12307A8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{C82631ED-06D1-4458-87BC-8257D12307A8}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{C82631ED-06D1-4458-87BC-8257D12307A8}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{C82631ED-06D1-4458-87BC-8257D12307A8}.Release|Any CPU.Build.0 = Release|Any CPU
+		{759E23B2-FC04-4695-902D-B073CDED3599}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{759E23B2-FC04-4695-902D-B073CDED3599}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{759E23B2-FC04-4695-902D-B073CDED3599}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{759E23B2-FC04-4695-902D-B073CDED3599}.Release|Any CPU.Build.0 = Release|Any CPU
+		{C77B792D-FC78-4CE2-9522-B40B0803C636}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{C77B792D-FC78-4CE2-9522-B40B0803C636}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{C77B792D-FC78-4CE2-9522-B40B0803C636}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{C77B792D-FC78-4CE2-9522-B40B0803C636}.Release|Any CPU.Build.0 = Release|Any CPU
+		{FD48DECA-1622-4173-B1D9-2101CF5E7C5F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{FD48DECA-1622-4173-B1D9-2101CF5E7C5F}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{FD48DECA-1622-4173-B1D9-2101CF5E7C5F}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{FD48DECA-1622-4173-B1D9-2101CF5E7C5F}.Release|Any CPU.Build.0 = Release|Any CPU
+		{58579368-5372-4E67-ACD6-9B59CB9FA698}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{58579368-5372-4E67-ACD6-9B59CB9FA698}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{58579368-5372-4E67-ACD6-9B59CB9FA698}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{58579368-5372-4E67-ACD6-9B59CB9FA698}.Release|Any CPU.Build.0 = Release|Any CPU
+		{C61714A6-F633-44FB-97F4-C91F425C1D15}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{C61714A6-F633-44FB-97F4-C91F425C1D15}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{C61714A6-F633-44FB-97F4-C91F425C1D15}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{C61714A6-F633-44FB-97F4-C91F425C1D15}.Release|Any CPU.Build.0 = Release|Any CPU
+		{3BE4AD0B-2BF0-4D68-B625-F6018EF0DCFA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{3BE4AD0B-2BF0-4D68-B625-F6018EF0DCFA}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{3BE4AD0B-2BF0-4D68-B625-F6018EF0DCFA}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{3BE4AD0B-2BF0-4D68-B625-F6018EF0DCFA}.Release|Any CPU.Build.0 = Release|Any CPU
+		{43DAFAC6-5343-4621-960E-A8A977EA3F0B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{43DAFAC6-5343-4621-960E-A8A977EA3F0B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{43DAFAC6-5343-4621-960E-A8A977EA3F0B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{43DAFAC6-5343-4621-960E-A8A977EA3F0B}.Release|Any CPU.Build.0 = Release|Any CPU
+		{20354386-3E71-4046-A269-3BC2A06F3EC8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{20354386-3E71-4046-A269-3BC2A06F3EC8}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{20354386-3E71-4046-A269-3BC2A06F3EC8}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{20354386-3E71-4046-A269-3BC2A06F3EC8}.Release|Any CPU.Build.0 = Release|Any CPU
+		{48EA5BBE-70E2-4198-869D-D7E59C45F30D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{48EA5BBE-70E2-4198-869D-D7E59C45F30D}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{48EA5BBE-70E2-4198-869D-D7E59C45F30D}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{48EA5BBE-70E2-4198-869D-D7E59C45F30D}.Release|Any CPU.Build.0 = Release|Any CPU
+		{661B70D7-F56A-46E0-9B81-6227B591B5E7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{661B70D7-F56A-46E0-9B81-6227B591B5E7}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{661B70D7-F56A-46E0-9B81-6227B591B5E7}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{661B70D7-F56A-46E0-9B81-6227B591B5E7}.Release|Any CPU.Build.0 = Release|Any CPU
+		{881F7AD1-A84E-47A2-9402-115C63C4031E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{881F7AD1-A84E-47A2-9402-115C63C4031E}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{881F7AD1-A84E-47A2-9402-115C63C4031E}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{881F7AD1-A84E-47A2-9402-115C63C4031E}.Release|Any CPU.Build.0 = Release|Any CPU
+		{0EBC910B-8867-4D3E-8686-91F34183D839}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{0EBC910B-8867-4D3E-8686-91F34183D839}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{0EBC910B-8867-4D3E-8686-91F34183D839}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{0EBC910B-8867-4D3E-8686-91F34183D839}.Release|Any CPU.Build.0 = Release|Any CPU
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+EndGlobal
diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
index 35c0646a3fd69779bb6c505e0f2db70b6e7ee905..65bf236def6b1207dd3820b7eb51b744bca02c42 100644
--- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
+++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
@@ -57,4 +57,7 @@
       <Name>Grpc.Examples</Name>
     </ProjectReference>
   </ItemGroup>
+  <ItemGroup>
+    <None Include="Grpc.Examples.MathClient.project.json" />
+  </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.project.json b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.xproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..4655bd4377432ec5bf52f1884f7f3bf142dfc171
--- /dev/null
+++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>fd48deca-1622-4173-b1d9-2101cf5e7c5f</ProjectGuid>
+    <RootNamespace>Grpc.Examples.MathClient</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples.MathClient/project.json b/src/csharp/Grpc.Examples.MathClient/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..b865cd5011859f0be9f3a529653a03a6ef113695
--- /dev/null
+++ b/src/csharp/Grpc.Examples.MathClient/project.json
@@ -0,0 +1,67 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.Examples": {
+      "target": "project"
+    }
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
index 74d79b44d98f36a203f71ce2367a153d5a8d8c63..26b42b6936da40bc20bacdfa52be316493be8bf3 100644
--- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
+++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
@@ -57,4 +57,7 @@
       <Name>Grpc.Examples</Name>
     </ProjectReference>
   </ItemGroup>
+  <ItemGroup>
+    <None Include="Grpc.Examples.MathServer.project.json" />
+  </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.project.json b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.xproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..38a449e8f29823936d4eebc6523bcd587641fbff
--- /dev/null
+++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>58579368-5372-4e67-acd6-9b59cb9fa698</ProjectGuid>
+    <RootNamespace>Grpc.Examples.MathServer</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples.MathServer/project.json b/src/csharp/Grpc.Examples.MathServer/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..b865cd5011859f0be9f3a529653a03a6ef113695
--- /dev/null
+++ b/src/csharp/Grpc.Examples.MathServer/project.json
@@ -0,0 +1,67 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.Examples": {
+      "target": "project"
+    }
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
index 3fd28c65282c914a8f6ef9146a6968931276566d..4c7d89309aff8608b53b81701727fa4726756f90 100644
--- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
+++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
@@ -69,6 +69,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.Examples.Tests.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
   <ItemGroup>
diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.project.json b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.xproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..9cecd18b2e4f2a52d92dbc9024c01ec99fe0b367
--- /dev/null
+++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>c61714a6-f633-44fb-97f4-c91f425c1d15</ProjectGuid>
+    <RootNamespace>Grpc.Examples.Tests</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs b/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs
index ee11105efe7f8018f4003a21e63a92b3cf9888f2..50dacc2eaaf258ad28a01518e1afb59653f4e49c 100644
--- a/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs
+++ b/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs
@@ -62,7 +62,7 @@ namespace Math.Tests
             };
             server.Start();
             channel = new Channel(Host, server.Ports.Single().BoundPort, ChannelCredentials.Insecure);
-            client = Math.NewClient(channel);
+            client = new Math.MathClient(channel);
         }
 
         [TestFixtureTearDown]
@@ -110,7 +110,7 @@ namespace Math.Tests
             {
                 var responses = await call.ResponseStream.ToListAsync();
                 CollectionAssert.AreEqual(new List<long> { 1, 1, 2, 3, 5, 8 },
-                    responses.ConvertAll((n) => n.Num_));
+                    responses.Select((n) => n.Num_));
             }
         }
 
@@ -162,7 +162,7 @@ namespace Math.Tests
         {
             using (var call = client.Sum())
             {
-                var numbers = new List<long> { 10, 20, 30 }.ConvertAll(n => new Num { Num_ = n });
+                var numbers = new List<long> { 10, 20, 30 }.Select(n => new Num { Num_ = n });
 
                 await call.RequestStream.WriteAllAsync(numbers);
                 var result = await call.ResponseAsync;
@@ -185,8 +185,8 @@ namespace Math.Tests
                 await call.RequestStream.WriteAllAsync(divArgsList);
                 var result = await call.ResponseStream.ToListAsync();
 
-                CollectionAssert.AreEqual(new long[] { 3, 4, 3 }, result.ConvertAll((divReply) => divReply.Quotient));
-                CollectionAssert.AreEqual(new long[] { 1, 16, 1 }, result.ConvertAll((divReply) => divReply.Remainder));
+                CollectionAssert.AreEqual(new long[] { 3, 4, 3 }, result.Select((divReply) => divReply.Quotient));
+                CollectionAssert.AreEqual(new long[] { 1, 16, 1 }, result.Select((divReply) => divReply.Remainder));
             }
         }
     }
diff --git a/src/csharp/Grpc.Examples.Tests/NUnitMain.cs b/src/csharp/Grpc.Examples.Tests/NUnitMain.cs
index ea87802766b02f69c582c8fc4c3ee1b83b45c245..1a522cab932f20502c0e93806cda42b8836d79e7 100644
--- a/src/csharp/Grpc.Examples.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.Examples.Tests/NUnitMain.cs
@@ -49,7 +49,7 @@ namespace Grpc.Examples.Tests
         {
             // Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
             GrpcEnvironment.SetLogger(new TextWriterLogger(Console.Error));
-#if DOTNET5_4
+#if NETSTANDARD1_5
             return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
 #else
             return new AutoRun().Execute(args);
diff --git a/src/csharp/Grpc.Examples.Tests/project.json b/src/csharp/Grpc.Examples.Tests/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..cc518eb6ff7ce4ec6769f0d3737ce9504cc65831
--- /dev/null
+++ b/src/csharp/Grpc.Examples.Tests/project.json
@@ -0,0 +1,69 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.Examples": {
+      "target": "project"
+    },
+    "NUnit": "3.2.0",
+    "NUnitLite": "3.2.0-*"
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
index 30170ab03c740de8c9667a18f99955e66cd25a9f..3dfa84e896e26098b61e2a72ef75d30ed70d06db 100644
--- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj
+++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
@@ -69,6 +69,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.Examples.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.project.json b/src/csharp/Grpc.Examples/Grpc.Examples.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.Examples/Grpc.Examples.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.xproj b/src/csharp/Grpc.Examples/Grpc.Examples.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..d1d7e6d981633fad59d3113d581f86a9a91facfd
--- /dev/null
+++ b/src/csharp/Grpc.Examples/Grpc.Examples.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>c77b792d-fc78-4ce2-9522-b40b0803c636</ProjectGuid>
+    <RootNamespace>Grpc.Examples</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index 4bbefcbe01a097f9ca21ee63d06b0231b0465af9..25abc514195abcf0d13323ea3cf3dd7048b9d14a 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -128,17 +128,22 @@ namespace Math {
     /// <summary>Client for Math</summary>
     public class MathClient : ClientBase<MathClient>
     {
+      /// <summary>Creates a new client for Math</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public MathClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for Math that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public MathClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected MathClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected MathClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -235,12 +240,6 @@ namespace Math {
       }
     }
 
-    /// <summary>Creates a new client for Math</summary>
-    public static MathClient NewClient(Channel channel)
-    {
-      return new MathClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(MathBase serviceImpl)
     {
diff --git a/src/csharp/Grpc.Examples/project.json b/src/csharp/Grpc.Examples/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..7d3f4dcbb1e4af55c7617e042e28207573103d1b
--- /dev/null
+++ b/src/csharp/Grpc.Examples/project.json
@@ -0,0 +1,27 @@
+{
+  "buildOptions": {
+  },
+
+  "dependencies": {
+    "Grpc.Core": {
+      "target": "project"
+    },
+    "Google.Protobuf": "3.0.0-beta3"
+  },
+  "frameworks": {
+    "net45": {
+      "frameworkAssemblies": {
+        "System.Runtime": "",
+        "System.IO": ""
+      }
+    },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
index a5ee4fdb46c1285b99513dbe5824de679e6f7fd6..aefacfbcc01814d9ba866a68910260a77be672c8 100644
--- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
+++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
@@ -74,6 +74,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.HealthCheck.Tests.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
   <ItemGroup>
diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.project.json b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.xproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..724c5b2a160503866b01320b904fa151e660b6de
--- /dev/null
+++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>43dafac6-5343-4621-960e-a8a977ea3f0b</ProjectGuid>
+    <RootNamespace>Grpc.HealthCheck.Tests</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs b/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs
index 070674bae9ddb5ef10280562bc4bccf17618edac..25a58fb3864a67281cf0140d9d7ed3e7607b6322 100644
--- a/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs
+++ b/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs
@@ -65,7 +65,7 @@ namespace Grpc.HealthCheck.Tests
             server.Start();
             channel = new Channel(Host, server.Ports.Single().BoundPort, ChannelCredentials.Insecure);
 
-            client = Grpc.Health.V1.Health.NewClient(channel);
+            client = new Grpc.Health.V1.Health.HealthClient(channel);
         }
 
         [TestFixtureTearDown]
diff --git a/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs b/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs
index 0820523f35c3ffc19adbdee4b0615b04fcdfd125..44634671ce5764eb562f0b78601113d06cb35d72 100644
--- a/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs
@@ -49,7 +49,7 @@ namespace Grpc.HealthCheck.Tests
         {
             // Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
             GrpcEnvironment.SetLogger(new TextWriterLogger(Console.Error));
-#if DOTNET5_4
+#if NETSTANDARD1_5
             return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
 #else
             return new AutoRun().Execute(args);
diff --git a/src/csharp/Grpc.HealthCheck.Tests/project.json b/src/csharp/Grpc.HealthCheck.Tests/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..fbf8d92f04d20f4b8ab68c58686006a6a8ef4f9f
--- /dev/null
+++ b/src/csharp/Grpc.HealthCheck.Tests/project.json
@@ -0,0 +1,69 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.HealthCheck": {
+      "target": "project"
+    },
+    "NUnit": "3.2.0",
+    "NUnitLite": "3.2.0-*"
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
index 2697b74f59ebd523aa40691f847afff6172c7846..7db8b2d38e2b5200f567b74c3e863655a4f0ff02 100644
--- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
@@ -65,6 +65,7 @@
   </ItemGroup>
   <ItemGroup>
     <None Include="Grpc.HealthCheck.nuspec" />
+    <None Include="Grpc.HealthCheck.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
   <ItemGroup>
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.project.json b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.xproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..5806a7af979bb71350fdbb8c3e741387f6c756c4
--- /dev/null
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>3be4ad0b-2bf0-4d68-b625-f6018ef0dcfa</ProjectGuid>
+    <RootNamespace>Grpc.HealthCheck</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index d0ade7d02bec8a2211035acd86623971a1f038c0..43eea0f22f596064bb3ce7518b523da83ef4f68c 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -71,17 +71,22 @@ namespace Grpc.Health.V1 {
     /// <summary>Client for Health</summary>
     public class HealthClient : ClientBase<HealthClient>
     {
+      /// <summary>Creates a new client for Health</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public HealthClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for Health that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public HealthClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected HealthClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected HealthClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -108,12 +113,6 @@ namespace Grpc.Health.V1 {
       }
     }
 
-    /// <summary>Creates a new client for Health</summary>
-    public static HealthClient NewClient(Channel channel)
-    {
-      return new HealthClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(HealthBase serviceImpl)
     {
diff --git a/src/csharp/Grpc.HealthCheck/project.json b/src/csharp/Grpc.HealthCheck/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e03e89d6a711034544f46ab1d5279d90df5c359
--- /dev/null
+++ b/src/csharp/Grpc.HealthCheck/project.json
@@ -0,0 +1,44 @@
+{
+  "version": "1.1.0-dev",
+  "title": "gRPC C# Healthchecking",
+  "authors": [ "Google Inc." ],
+  "copyright": "Copyright 2015, Google Inc.",
+  "packOptions": {
+    "summary": "Implementation of gRPC health service",
+    "description": "Example implementation of grpc.health.v1 service that can be used for health-checking.",
+    "owners": [ "grpc-packages" ],
+    "licenseUrl": "https://github.com/grpc/grpc/blob/master/LICENSE",
+    "projectUrl": "https://github.com/grpc/grpc",
+    "requireLicenseAcceptance": false,
+    "tags": [ "gRPC health check" ]
+  },
+  "buildOptions": {
+    "define": [ "SIGNED" ],
+    "keyFile": "../keys/Grpc.snk",
+    "publicSign": true,
+    "xmlDoc": true,
+    "compile": {
+      "includeFiles": [ "../Grpc.Core/Version.cs" ]
+    }
+  },
+  "dependencies": {
+    "Grpc.Core": "1.1.0-dev",
+    "Google.Protobuf": "3.0.0-beta3"
+  },
+  "frameworks": {
+    "net45": {
+      "frameworkAssemblies": {
+        "System.Runtime": "",
+        "System.IO": ""
+      }
+    },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
index 339a754c02cefd8fb47af638ee1f3e0ea8a884bf..91fb3ce5bcf4233c5dc26a49ad626a22a76d3d09 100644
--- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
@@ -83,6 +83,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.IntegrationTesting.Client.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.project.json b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.xproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..7f456cfaef1f6af687091aa05bdb0672c211227c
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>48ea5bbe-70e2-4198-869d-d7e59c45f30d</ProjectGuid>
+    <RootNamespace>Grpc.IntegrationTesting.Client</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/project.json b/src/csharp/Grpc.IntegrationTesting.Client/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..4a2846feea0d40cc3c8ff115994ce5422d8e51bb
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.Client/project.json
@@ -0,0 +1,70 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.IntegrationTesting": {
+      "target": "project"
+    }
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45",
+        "net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
index 0dc2751b04564f3e7998a0923a2873496a332334..dda26a68923a18268808d2f838a1278402c10f38 100644
--- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
@@ -59,5 +59,6 @@
   </ItemGroup>
   <ItemGroup>
     <None Include="app.config" />
+    <None Include="Grpc.IntegrationTesting.QpsWorker.project.json" />
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.project.json b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.xproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..15bec443d6c0d332e2f17ba80df1cf923aac02ed
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>661b70d7-f56a-46e0-9b81-6227b591b5e7</ProjectGuid>
+    <RootNamespace>Grpc.IntegrationTesting.QpsWorker</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json b/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..4a2846feea0d40cc3c8ff115994ce5422d8e51bb
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json
@@ -0,0 +1,70 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.IntegrationTesting": {
+      "target": "project"
+    }
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45",
+        "net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
index 27a5650308698ccbd5e39cef15212ebc5a5336f3..f73d99dbd1a06124c340ff758f1b85c664df0b71 100644
--- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
@@ -83,6 +83,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.IntegrationTesting.Server.project.json" />
     <None Include="packages.config" />
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.project.json b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.xproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..689eb0b842544740f2b3d7d2998eb9ff91008c51
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>881f7ad1-a84e-47a2-9402-115c63c4031e</ProjectGuid>
+    <RootNamespace>Grpc.IntegrationTesting.Server</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/project.json b/src/csharp/Grpc.IntegrationTesting.Server/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..4a2846feea0d40cc3c8ff115994ce5422d8e51bb
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.Server/project.json
@@ -0,0 +1,70 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.IntegrationTesting": {
+      "target": "project"
+    }
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45",
+        "net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
index d6eba74289ef70ae1b14354c134fedce21d1af96..8bd3d789138a670e4e9e1ebf5c914721e89f42ad 100644
--- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
 <Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
   <PropertyGroup>
     <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
@@ -57,4 +57,7 @@
       <Name>Grpc.IntegrationTesting</Name>
     </ProjectReference>
   </ItemGroup>
+  <ItemGroup>
+    <None Include="Grpc.IntegrationTesting.StressClient.project.json" />
+  </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.project.json b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.xproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..2f4fdcbb4709a3c652ae16a17004fab733239247
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.xproj
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DotNet\Microsoft.DotNet.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>0ebc910b-8867-4d3e-8686-91f34183d839</ProjectGuid>
+    <RootNamespace>Grpc.IntegrationTesting.StressClient</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">.\obj</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DotNet\Microsoft.DotNet.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/project.json b/src/csharp/Grpc.IntegrationTesting.StressClient/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..4a2846feea0d40cc3c8ff115994ce5422d8e51bb
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/project.json
@@ -0,0 +1,70 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.IntegrationTesting": {
+      "target": "project"
+    }
+  },
+  "frameworks": {
+    "net45": { },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45",
+        "net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs b/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
index 39b9ae08e64ef99fcfc9c3c83764d65c95558034..b9c0fe6d0d847e70c7523f02590b94c02c979e04 100644
--- a/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
@@ -211,7 +211,7 @@ namespace Grpc.IntegrationTesting
 
             bool profilerReset = false;
 
-            var client = BenchmarkService.NewClient(channel);
+            var client = new BenchmarkService.BenchmarkServiceClient(channel);
             var request = CreateSimpleRequest();
             var stopwatch = new Stopwatch();
 
@@ -237,7 +237,7 @@ namespace Grpc.IntegrationTesting
 
         private async Task RunUnaryAsync(Channel channel, IInterarrivalTimer timer)
         {
-            var client = BenchmarkService.NewClient(channel);
+            var client = new BenchmarkService.BenchmarkServiceClient(channel);
             var request = CreateSimpleRequest();
             var stopwatch = new Stopwatch();
 
@@ -256,7 +256,7 @@ namespace Grpc.IntegrationTesting
 
         private async Task RunStreamingPingPongAsync(Channel channel, IInterarrivalTimer timer)
         {
-            var client = BenchmarkService.NewClient(channel);
+            var client = new BenchmarkService.BenchmarkServiceClient(channel);
             var request = CreateSimpleRequest();
             var stopwatch = new Stopwatch();
 
diff --git a/src/csharp/Grpc.IntegrationTesting/GeneratedClientTest.cs b/src/csharp/Grpc.IntegrationTesting/GeneratedClientTest.cs
index 37786b6c30a79eb8be5eb2b969f0e0e672b01fb9..eb7b55a28631743d72458356cb53bf327d75ef27 100644
--- a/src/csharp/Grpc.IntegrationTesting/GeneratedClientTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/GeneratedClientTest.cs
@@ -40,7 +40,6 @@ using System.Threading.Tasks;
 using Grpc.Core;
 using Grpc.Core.Utils;
 using Grpc.Testing;
-using Moq;
 using NUnit.Framework;
 
 namespace Grpc.IntegrationTesting
@@ -49,14 +48,16 @@ namespace Grpc.IntegrationTesting
     {
         TestService.TestServiceClient unimplementedClient = new UnimplementedTestServiceClient();
 
+        // TODO: replace Moq by some mocking library with CoreCLR support.
+#if !NETSTANDARD1_5
         [Test]
         public void ExpandedParamOverloadCanBeMocked()
         {
             var expected = new SimpleResponse();
 
-            var mockClient = new Mock<TestService.TestServiceClient>();
+            var mockClient = new Moq.Mock<TestService.TestServiceClient>();
             // mocking is relatively clumsy because one needs to specify value for all the optional params.
-            mockClient.Setup(m => m.UnaryCall(It.IsAny<SimpleRequest>(), null, null, CancellationToken.None)).Returns(expected);
+            mockClient.Setup(m => m.UnaryCall(Moq.It.IsAny<SimpleRequest>(), null, null, CancellationToken.None)).Returns(expected);
 
             Assert.AreSame(expected, mockClient.Object.UnaryCall(new SimpleRequest()));
         }
@@ -66,11 +67,12 @@ namespace Grpc.IntegrationTesting
         {
             var expected = new SimpleResponse();
 
-            var mockClient = new Mock<TestService.TestServiceClient>();
-            mockClient.Setup(m => m.UnaryCall(It.IsAny<SimpleRequest>(), It.IsAny<CallOptions>())).Returns(expected);
+            var mockClient = new Moq.Mock<TestService.TestServiceClient>();
+            mockClient.Setup(m => m.UnaryCall(Moq.It.IsAny<SimpleRequest>(), Moq.It.IsAny<CallOptions>())).Returns(expected);
 
             Assert.AreSame(expected, mockClient.Object.UnaryCall(new SimpleRequest(), new CallOptions()));
         }
+#endif
 
         [Test]
         public void DefaultMethodStubThrows_UnaryCall()
diff --git a/src/csharp/Grpc.IntegrationTesting/GeneratedServiceBaseTest.cs b/src/csharp/Grpc.IntegrationTesting/GeneratedServiceBaseTest.cs
index 5fd0e14e78ddcca9ec1a359d04202d08b1f8efe5..4216dc1d6be6813646dde7c8c807431c983e9f5d 100644
--- a/src/csharp/Grpc.IntegrationTesting/GeneratedServiceBaseTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/GeneratedServiceBaseTest.cs
@@ -40,7 +40,6 @@ using System.Threading.Tasks;
 using Grpc.Core;
 using Grpc.Core.Utils;
 using Grpc.Testing;
-using Moq;
 using NUnit.Framework;
 
 namespace Grpc.IntegrationTesting
@@ -62,7 +61,7 @@ namespace Grpc.IntegrationTesting
             };
             server.Start();
             channel = new Channel(Host, server.Ports.Single().BoundPort, ChannelCredentials.Insecure);
-            client = TestService.NewClient(channel);
+            client = new TestService.TestServiceClient(channel);
         }
 
         [TearDown]
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
index 008904940884a2257695f30ef79bd3cb95c58836..3a0764230d6c8fd801043a3a8c428f54abe4c0e0 100644
--- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
@@ -129,6 +129,7 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
+    <None Include="Grpc.IntegrationTesting.project.json" />
     <None Include="packages.config">
       <SubType>Designer</SubType>
     </None>
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.project.json b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.project.json
new file mode 100644
index 0000000000000000000000000000000000000000..c2f5bcb1637badadff8959d672e04c942cb54d51
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.project.json
@@ -0,0 +1,8 @@
+{
+  "frameworks": {
+    "net45": { }
+  },
+  "runtimes": {
+    "win": { }
+  }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.xproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.xproj
new file mode 100644
index 0000000000000000000000000000000000000000..357300ecb9b9a75cf035d0a062a8126727d0ca7d
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.xproj
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0.25123" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup>
+    <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25123</VisualStudioVersion>
+    <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.Props" Condition="'$(VSToolsPath)' != ''" />
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>20354386-3e71-4046-a269-3bc2a06f3ec8</ProjectGuid>
+    <RootNamespace>Grpc.IntegrationTesting</RootNamespace>
+    <BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">..\artifacts\obj\$(MSBuildProjectName)</BaseIntermediateOutputPath>
+    <OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
+  </PropertyGroup>
+  <PropertyGroup>
+    <SchemaVersion>2.0</SchemaVersion>
+  </PropertyGroup>
+  <Import Project="$(VSToolsPath)\DNX\Microsoft.DNX.targets" Condition="'$(VSToolsPath)' != ''" />
+</Project>
\ No newline at end of file
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
index aea40afee2c4fbc5a855fb2d7f697b2f072faf2a..e27fe5b3d80e5abd5046ce0b5e5c005f51685f88 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
@@ -145,16 +145,26 @@ namespace Grpc.IntegrationTesting
 
             if (options.TestCase == "jwt_token_creds")
             {
+#if !NETSTANDARD1_5
                 var googleCredential = await GoogleCredential.GetApplicationDefaultAsync();
                 Assert.IsTrue(googleCredential.IsCreateScopedRequired);
                 credentials = ChannelCredentials.Create(credentials, googleCredential.ToCallCredentials());
+#else
+                // TODO(jtattermusch): implement this
+                throw new NotImplementedException("Not supported on CoreCLR yet");
+#endif
             }
 
             if (options.TestCase == "compute_engine_creds")
             {
+#if !NETSTANDARD1_5
                 var googleCredential = await GoogleCredential.GetApplicationDefaultAsync();
                 Assert.IsFalse(googleCredential.IsCreateScopedRequired);
                 credentials = ChannelCredentials.Create(credentials, googleCredential.ToCallCredentials());
+#else
+                // TODO(jtattermusch): implement this
+                throw new NotImplementedException("Not supported on CoreCLR yet");
+#endif
             }
             return credentials;
         }
@@ -212,6 +222,12 @@ namespace Grpc.IntegrationTesting
                 case "unimplemented_method":
                     RunUnimplementedMethod(new UnimplementedService.UnimplementedServiceClient(channel));
                     break;
+                case "client_compressed_unary":
+                    RunClientCompressedUnary(client);
+                    break;
+                case "client_compressed_streaming":
+                    await RunClientCompressedStreamingAsync(client);
+                    break;
                 default:
                     throw new ArgumentException("Unknown test case " + options.TestCase);
             }
@@ -230,13 +246,11 @@ namespace Grpc.IntegrationTesting
             Console.WriteLine("running large_unary");
             var request = new SimpleRequest
             {
-                ResponseType = PayloadType.Compressable,
                 ResponseSize = 314159,
                 Payload = CreateZerosPayload(271828)
             };
             var response = client.UnaryCall(request);
 
-            Assert.AreEqual(PayloadType.Compressable, response.Payload.Type);
             Assert.AreEqual(314159, response.Payload.Body.Length);
             Console.WriteLine("Passed!");
         }
@@ -245,7 +259,7 @@ namespace Grpc.IntegrationTesting
         {
             Console.WriteLine("running client_streaming");
 
-            var bodySizes = new List<int> { 27182, 8, 1828, 45904 }.ConvertAll((size) => new StreamingInputCallRequest { Payload = CreateZerosPayload(size) });
+            var bodySizes = new List<int> { 27182, 8, 1828, 45904 }.Select((size) => new StreamingInputCallRequest { Payload = CreateZerosPayload(size) });
 
             using (var call = client.StreamingInputCall())
             {
@@ -265,18 +279,13 @@ namespace Grpc.IntegrationTesting
 
             var request = new StreamingOutputCallRequest
             {
-                ResponseType = PayloadType.Compressable,
-                ResponseParameters = { bodySizes.ConvertAll((size) => new ResponseParameters { Size = size }) }
+                ResponseParameters = { bodySizes.Select((size) => new ResponseParameters { Size = size }) }
             };
 
             using (var call = client.StreamingOutputCall(request))
             {
                 var responseList = await call.ResponseStream.ToListAsync();
-                foreach (var res in responseList)
-                {
-                    Assert.AreEqual(PayloadType.Compressable, res.Payload.Type);
-                }
-                CollectionAssert.AreEqual(bodySizes, responseList.ConvertAll((item) => item.Payload.Body.Length));
+                CollectionAssert.AreEqual(bodySizes, responseList.Select((item) => item.Payload.Body.Length));
             }
             Console.WriteLine("Passed!");
         }
@@ -289,46 +298,38 @@ namespace Grpc.IntegrationTesting
             {
                 await call.RequestStream.WriteAsync(new StreamingOutputCallRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseParameters = { new ResponseParameters { Size = 31415 } },
                     Payload = CreateZerosPayload(27182)
                 });
 
                 Assert.IsTrue(await call.ResponseStream.MoveNext());
-                Assert.AreEqual(PayloadType.Compressable, call.ResponseStream.Current.Payload.Type);
                 Assert.AreEqual(31415, call.ResponseStream.Current.Payload.Body.Length);
 
                 await call.RequestStream.WriteAsync(new StreamingOutputCallRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseParameters = { new ResponseParameters { Size = 9 } },
                     Payload = CreateZerosPayload(8)
                 });
 
                 Assert.IsTrue(await call.ResponseStream.MoveNext());
-                Assert.AreEqual(PayloadType.Compressable, call.ResponseStream.Current.Payload.Type);
                 Assert.AreEqual(9, call.ResponseStream.Current.Payload.Body.Length);
 
                 await call.RequestStream.WriteAsync(new StreamingOutputCallRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseParameters = { new ResponseParameters { Size = 2653 } },
                     Payload = CreateZerosPayload(1828)
                 });
 
                 Assert.IsTrue(await call.ResponseStream.MoveNext());
-                Assert.AreEqual(PayloadType.Compressable, call.ResponseStream.Current.Payload.Type);
                 Assert.AreEqual(2653, call.ResponseStream.Current.Payload.Body.Length);
 
                 await call.RequestStream.WriteAsync(new StreamingOutputCallRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseParameters = { new ResponseParameters { Size = 58979 } },
                     Payload = CreateZerosPayload(45904)
                 });
 
                 Assert.IsTrue(await call.ResponseStream.MoveNext());
-                Assert.AreEqual(PayloadType.Compressable, call.ResponseStream.Current.Payload.Type);
                 Assert.AreEqual(58979, call.ResponseStream.Current.Payload.Body.Length);
 
                 await call.RequestStream.CompleteAsync();
@@ -357,7 +358,6 @@ namespace Grpc.IntegrationTesting
 
             var request = new SimpleRequest
             {
-                ResponseType = PayloadType.Compressable,
                 ResponseSize = 314159,
                 Payload = CreateZerosPayload(271828),
                 FillUsername = true,
@@ -367,7 +367,6 @@ namespace Grpc.IntegrationTesting
             // not setting credentials here because they were set on channel already
             var response = client.UnaryCall(request);
 
-            Assert.AreEqual(PayloadType.Compressable, response.Payload.Type);
             Assert.AreEqual(314159, response.Payload.Body.Length);
             Assert.False(string.IsNullOrEmpty(response.OauthScope));
             Assert.True(oauthScope.Contains(response.OauthScope));
@@ -381,7 +380,6 @@ namespace Grpc.IntegrationTesting
            
             var request = new SimpleRequest
             {
-                ResponseType = PayloadType.Compressable,
                 ResponseSize = 314159,
                 Payload = CreateZerosPayload(271828),
                 FillUsername = true,
@@ -390,7 +388,6 @@ namespace Grpc.IntegrationTesting
             // not setting credentials here because they were set on channel already
             var response = client.UnaryCall(request);
 
-            Assert.AreEqual(PayloadType.Compressable, response.Payload.Type);
             Assert.AreEqual(314159, response.Payload.Body.Length);
             Assert.AreEqual(GetEmailFromServiceAccountFile(), response.Username);
             Console.WriteLine("Passed!");
@@ -398,6 +395,7 @@ namespace Grpc.IntegrationTesting
 
         public static async Task RunOAuth2AuthTokenAsync(TestService.TestServiceClient client, string oauthScope)
         {
+#if !NETSTANDARD1_5
             Console.WriteLine("running oauth2_auth_token");
             ITokenAccess credential = (await GoogleCredential.GetApplicationDefaultAsync()).CreateScoped(new[] { oauthScope });
             string oauth2Token = await credential.GetAccessTokenForRequestAsync();
@@ -415,10 +413,15 @@ namespace Grpc.IntegrationTesting
             Assert.True(oauthScope.Contains(response.OauthScope));
             Assert.AreEqual(GetEmailFromServiceAccountFile(), response.Username);
             Console.WriteLine("Passed!");
+#else
+            // TODO(jtattermusch): implement this
+            throw new NotImplementedException("Not supported on CoreCLR yet");
+#endif
         }
 
         public static async Task RunPerRpcCredsAsync(TestService.TestServiceClient client, string oauthScope)
         {
+#if !NETSTANDARD1_5
             Console.WriteLine("running per_rpc_creds");
             ITokenAccess googleCredential = await GoogleCredential.GetApplicationDefaultAsync();
 
@@ -432,6 +435,10 @@ namespace Grpc.IntegrationTesting
 
             Assert.AreEqual(GetEmailFromServiceAccountFile(), response.Username);
             Console.WriteLine("Passed!");
+#else
+            // TODO(jtattermusch): implement this
+            throw new NotImplementedException("Not supported on CoreCLR yet");
+#endif
         }
 
         public static async Task RunCancelAfterBeginAsync(TestService.TestServiceClient client)
@@ -460,13 +467,11 @@ namespace Grpc.IntegrationTesting
             {
                 await call.RequestStream.WriteAsync(new StreamingOutputCallRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseParameters = { new ResponseParameters { Size = 31415 } },
                     Payload = CreateZerosPayload(27182)
                 });
 
                 Assert.IsTrue(await call.ResponseStream.MoveNext());
-                Assert.AreEqual(PayloadType.Compressable, call.ResponseStream.Current.Payload.Type);
                 Assert.AreEqual(31415, call.ResponseStream.Current.Payload.Body.Length);
 
                 cts.Cancel();
@@ -526,7 +531,6 @@ namespace Grpc.IntegrationTesting
                 // step 1: test unary call
                 var request = new SimpleRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseSize = 314159,
                     Payload = CreateZerosPayload(271828)
                 };
@@ -545,7 +549,6 @@ namespace Grpc.IntegrationTesting
                 // step 2: test full duplex call
                 var request = new StreamingOutputCallRequest
                 {
-                    ResponseType = PayloadType.Compressable,
                     ResponseParameters = { new ResponseParameters { Size = 31415 } },
                     Payload = CreateZerosPayload(27182)
                 };
@@ -618,21 +621,127 @@ namespace Grpc.IntegrationTesting
             Console.WriteLine("Passed!");
         }
 
+        public static void RunClientCompressedUnary(TestService.TestServiceClient client)
+        {
+            Console.WriteLine("running client_compressed_unary");
+            var probeRequest = new SimpleRequest
+            {
+                ExpectCompressed = new BoolValue
+                {
+                    Value = true  // lie about compression
+                },
+                ResponseSize = 314159,
+                Payload = CreateZerosPayload(271828)
+            };
+            var e = Assert.Throws<RpcException>(() => client.UnaryCall(probeRequest, CreateClientCompressionMetadata(false)));
+            Assert.AreEqual(StatusCode.InvalidArgument, e.Status.StatusCode);
+
+            var compressedRequest = new SimpleRequest
+            {
+                ExpectCompressed = new BoolValue
+                {
+                    Value = true
+                },
+                ResponseSize = 314159,
+                Payload = CreateZerosPayload(271828)
+            };
+            var response1 = client.UnaryCall(compressedRequest, CreateClientCompressionMetadata(true));
+            Assert.AreEqual(314159, response1.Payload.Body.Length);
+
+            var uncompressedRequest = new SimpleRequest
+            {
+                ExpectCompressed = new BoolValue
+                {
+                    Value = false
+                },
+                ResponseSize = 314159,
+                Payload = CreateZerosPayload(271828)
+            };
+            var response2 = client.UnaryCall(uncompressedRequest, CreateClientCompressionMetadata(false));
+            Assert.AreEqual(314159, response2.Payload.Body.Length);
+
+            Console.WriteLine("Passed!");
+        }
+
+        public static async Task RunClientCompressedStreamingAsync(TestService.TestServiceClient client)
+        {
+            Console.WriteLine("running client_compressed_streaming");
+            try
+            {
+                var probeCall = client.StreamingInputCall(CreateClientCompressionMetadata(false));
+                await probeCall.RequestStream.WriteAsync(new StreamingInputCallRequest
+                {
+                    ExpectCompressed = new BoolValue
+                    {
+                        Value = true
+                    },
+                    Payload = CreateZerosPayload(27182)
+                });
+
+                // cannot use Assert.ThrowsAsync because it uses Task.Wait and would deadlock.
+                await probeCall;
+                Assert.Fail();
+            }
+            catch (RpcException e)
+            {
+                Assert.AreEqual(StatusCode.InvalidArgument, e.Status.StatusCode);
+            }
+
+            var call = client.StreamingInputCall(CreateClientCompressionMetadata(true));
+            await call.RequestStream.WriteAsync(new StreamingInputCallRequest
+            {
+                ExpectCompressed = new BoolValue
+                {
+                    Value = true
+                },
+                Payload = CreateZerosPayload(27182)
+            });
+
+            call.RequestStream.WriteOptions = new WriteOptions(WriteFlags.NoCompress);
+            await call.RequestStream.WriteAsync(new StreamingInputCallRequest
+            {
+                ExpectCompressed = new BoolValue
+                {
+                    Value = false
+                },
+                Payload = CreateZerosPayload(45904)
+            });
+            await call.RequestStream.CompleteAsync();
+
+            var response = await call.ResponseAsync;
+            Assert.AreEqual(73086, response.AggregatedPayloadSize);
+
+            Console.WriteLine("Passed!");
+        }
+
         private static Payload CreateZerosPayload(int size)
         {
             return new Payload { Body = ByteString.CopyFrom(new byte[size]) };
         }
 
+        private static Metadata CreateClientCompressionMetadata(bool compressed)
+        {
+            var algorithmName = compressed ? "gzip" : "identity";
+            return new Metadata
+            {
+                { new Metadata.Entry(Metadata.CompressionRequestAlgorithmMetadataKey, algorithmName) }
+            };
+        }
+
         // extracts the client_email field from service account file used for auth test cases
         private static string GetEmailFromServiceAccountFile()
         {
+#if !NETSTANDARD1_5
             string keyFile = Environment.GetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS");
             Assert.IsNotNull(keyFile);
-
             var jobject = JObject.Parse(File.ReadAllText(keyFile));
             string email = jobject.GetValue("client_email").Value<string>();
             Assert.IsTrue(email.Length > 0);  // spec requires nonempty client email.
             return email;
+#else
+            // TODO(jtattermusch): implement this
+            throw new NotImplementedException("Not supported on CoreCLR yet");
+#endif
         }
 
         private static Metadata CreateTestMetadata()
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs b/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
index 4ee1ff5ec8ae8f94a617a58c671d4dab104d1ea4..f907f630dabd3aed25e73d37e09dfb33b7ee7e80 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
@@ -69,7 +69,7 @@ namespace Grpc.IntegrationTesting
             };
             int port = server.Ports.Single().BoundPort;
             channel = new Channel(Host, port, TestCredentials.CreateSslCredentials(), options);
-            client = TestService.NewClient(channel);
+            client = new TestService.TestServiceClient(channel);
         }
 
         [TestFixtureTearDown]
@@ -148,7 +148,7 @@ namespace Grpc.IntegrationTesting
         [Test]
         public void UnimplementedMethod()
         {
-            InteropClient.RunUnimplementedMethod(UnimplementedService.NewClient(channel));
+            InteropClient.RunUnimplementedMethod(new UnimplementedService.UnimplementedServiceClient(channel));
         }
     }
 }
diff --git a/src/csharp/Grpc.IntegrationTesting/Messages.cs b/src/csharp/Grpc.IntegrationTesting/Messages.cs
index d42501aa5b8ca350e40b94f6cb361687d5e269e2..1240db128b06f954da27274c2edc015dc770b914 100644
--- a/src/csharp/Grpc.IntegrationTesting/Messages.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Messages.cs
@@ -24,46 +24,48 @@ namespace Grpc.Testing {
       byte[] descriptorData = global::System.Convert.FromBase64String(
           string.Concat(
             "CiVzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL21lc3NhZ2VzLnByb3RvEgxncnBj",
-            "LnRlc3RpbmciQAoHUGF5bG9hZBInCgR0eXBlGAEgASgOMhkuZ3JwYy50ZXN0",
-            "aW5nLlBheWxvYWRUeXBlEgwKBGJvZHkYAiABKAwiKwoKRWNob1N0YXR1cxIM",
-            "CgRjb2RlGAEgASgFEg8KB21lc3NhZ2UYAiABKAkioQIKDVNpbXBsZVJlcXVl",
-            "c3QSMAoNcmVzcG9uc2VfdHlwZRgBIAEoDjIZLmdycGMudGVzdGluZy5QYXls",
-            "b2FkVHlwZRIVCg1yZXNwb25zZV9zaXplGAIgASgFEiYKB3BheWxvYWQYAyAB",
-            "KAsyFS5ncnBjLnRlc3RpbmcuUGF5bG9hZBIVCg1maWxsX3VzZXJuYW1lGAQg",
-            "ASgIEhgKEGZpbGxfb2F1dGhfc2NvcGUYBSABKAgSOwoUcmVzcG9uc2VfY29t",
-            "cHJlc3Npb24YBiABKA4yHS5ncnBjLnRlc3RpbmcuQ29tcHJlc3Npb25UeXBl",
-            "EjEKD3Jlc3BvbnNlX3N0YXR1cxgHIAEoCzIYLmdycGMudGVzdGluZy5FY2hv",
-            "U3RhdHVzIl8KDlNpbXBsZVJlc3BvbnNlEiYKB3BheWxvYWQYASABKAsyFS5n",
-            "cnBjLnRlc3RpbmcuUGF5bG9hZBIQCgh1c2VybmFtZRgCIAEoCRITCgtvYXV0",
-            "aF9zY29wZRgDIAEoCSJDChlTdHJlYW1pbmdJbnB1dENhbGxSZXF1ZXN0EiYK",
-            "B3BheWxvYWQYASABKAsyFS5ncnBjLnRlc3RpbmcuUGF5bG9hZCI9ChpTdHJl",
-            "YW1pbmdJbnB1dENhbGxSZXNwb25zZRIfChdhZ2dyZWdhdGVkX3BheWxvYWRf",
-            "c2l6ZRgBIAEoBSI3ChJSZXNwb25zZVBhcmFtZXRlcnMSDAoEc2l6ZRgBIAEo",
-            "BRITCgtpbnRlcnZhbF91cxgCIAEoBSKlAgoaU3RyZWFtaW5nT3V0cHV0Q2Fs",
-            "bFJlcXVlc3QSMAoNcmVzcG9uc2VfdHlwZRgBIAEoDjIZLmdycGMudGVzdGlu",
-            "Zy5QYXlsb2FkVHlwZRI9ChNyZXNwb25zZV9wYXJhbWV0ZXJzGAIgAygLMiAu",
-            "Z3JwYy50ZXN0aW5nLlJlc3BvbnNlUGFyYW1ldGVycxImCgdwYXlsb2FkGAMg",
-            "ASgLMhUuZ3JwYy50ZXN0aW5nLlBheWxvYWQSOwoUcmVzcG9uc2VfY29tcHJl",
-            "c3Npb24YBiABKA4yHS5ncnBjLnRlc3RpbmcuQ29tcHJlc3Npb25UeXBlEjEK",
-            "D3Jlc3BvbnNlX3N0YXR1cxgHIAEoCzIYLmdycGMudGVzdGluZy5FY2hvU3Rh",
-            "dHVzIkUKG1N0cmVhbWluZ091dHB1dENhbGxSZXNwb25zZRImCgdwYXlsb2Fk",
-            "GAEgASgLMhUuZ3JwYy50ZXN0aW5nLlBheWxvYWQiMwoPUmVjb25uZWN0UGFy",
-            "YW1zEiAKGG1heF9yZWNvbm5lY3RfYmFja29mZl9tcxgBIAEoBSIzCg1SZWNv",
-            "bm5lY3RJbmZvEg4KBnBhc3NlZBgBIAEoCBISCgpiYWNrb2ZmX21zGAIgAygF",
-            "Kj8KC1BheWxvYWRUeXBlEhAKDENPTVBSRVNTQUJMRRAAEhIKDlVOQ09NUFJF",
-            "U1NBQkxFEAESCgoGUkFORE9NEAIqMgoPQ29tcHJlc3Npb25UeXBlEggKBE5P",
-            "TkUQABIICgRHWklQEAESCwoHREVGTEFURRACYgZwcm90bzM="));
+            "LnRlc3RpbmciGgoJQm9vbFZhbHVlEg0KBXZhbHVlGAEgASgIIkAKB1BheWxv",
+            "YWQSJwoEdHlwZRgBIAEoDjIZLmdycGMudGVzdGluZy5QYXlsb2FkVHlwZRIM",
+            "CgRib2R5GAIgASgMIisKCkVjaG9TdGF0dXMSDAoEY29kZRgBIAEoBRIPCgdt",
+            "ZXNzYWdlGAIgASgJIs4CCg1TaW1wbGVSZXF1ZXN0EjAKDXJlc3BvbnNlX3R5",
+            "cGUYASABKA4yGS5ncnBjLnRlc3RpbmcuUGF5bG9hZFR5cGUSFQoNcmVzcG9u",
+            "c2Vfc2l6ZRgCIAEoBRImCgdwYXlsb2FkGAMgASgLMhUuZ3JwYy50ZXN0aW5n",
+            "LlBheWxvYWQSFQoNZmlsbF91c2VybmFtZRgEIAEoCBIYChBmaWxsX29hdXRo",
+            "X3Njb3BlGAUgASgIEjQKE3Jlc3BvbnNlX2NvbXByZXNzZWQYBiABKAsyFy5n",
+            "cnBjLnRlc3RpbmcuQm9vbFZhbHVlEjEKD3Jlc3BvbnNlX3N0YXR1cxgHIAEo",
+            "CzIYLmdycGMudGVzdGluZy5FY2hvU3RhdHVzEjIKEWV4cGVjdF9jb21wcmVz",
+            "c2VkGAggASgLMhcuZ3JwYy50ZXN0aW5nLkJvb2xWYWx1ZSJfCg5TaW1wbGVS",
+            "ZXNwb25zZRImCgdwYXlsb2FkGAEgASgLMhUuZ3JwYy50ZXN0aW5nLlBheWxv",
+            "YWQSEAoIdXNlcm5hbWUYAiABKAkSEwoLb2F1dGhfc2NvcGUYAyABKAkidwoZ",
+            "U3RyZWFtaW5nSW5wdXRDYWxsUmVxdWVzdBImCgdwYXlsb2FkGAEgASgLMhUu",
+            "Z3JwYy50ZXN0aW5nLlBheWxvYWQSMgoRZXhwZWN0X2NvbXByZXNzZWQYAiAB",
+            "KAsyFy5ncnBjLnRlc3RpbmcuQm9vbFZhbHVlIj0KGlN0cmVhbWluZ0lucHV0",
+            "Q2FsbFJlc3BvbnNlEh8KF2FnZ3JlZ2F0ZWRfcGF5bG9hZF9zaXplGAEgASgF",
+            "ImQKElJlc3BvbnNlUGFyYW1ldGVycxIMCgRzaXplGAEgASgFEhMKC2ludGVy",
+            "dmFsX3VzGAIgASgFEisKCmNvbXByZXNzZWQYAyABKAsyFy5ncnBjLnRlc3Rp",
+            "bmcuQm9vbFZhbHVlIugBChpTdHJlYW1pbmdPdXRwdXRDYWxsUmVxdWVzdBIw",
+            "Cg1yZXNwb25zZV90eXBlGAEgASgOMhkuZ3JwYy50ZXN0aW5nLlBheWxvYWRU",
+            "eXBlEj0KE3Jlc3BvbnNlX3BhcmFtZXRlcnMYAiADKAsyIC5ncnBjLnRlc3Rp",
+            "bmcuUmVzcG9uc2VQYXJhbWV0ZXJzEiYKB3BheWxvYWQYAyABKAsyFS5ncnBj",
+            "LnRlc3RpbmcuUGF5bG9hZBIxCg9yZXNwb25zZV9zdGF0dXMYByABKAsyGC5n",
+            "cnBjLnRlc3RpbmcuRWNob1N0YXR1cyJFChtTdHJlYW1pbmdPdXRwdXRDYWxs",
+            "UmVzcG9uc2USJgoHcGF5bG9hZBgBIAEoCzIVLmdycGMudGVzdGluZy5QYXls",
+            "b2FkIjMKD1JlY29ubmVjdFBhcmFtcxIgChhtYXhfcmVjb25uZWN0X2JhY2tv",
+            "ZmZfbXMYASABKAUiMwoNUmVjb25uZWN0SW5mbxIOCgZwYXNzZWQYASABKAgS",
+            "EgoKYmFja29mZl9tcxgCIAMoBSofCgtQYXlsb2FkVHlwZRIQCgxDT01QUkVT",
+            "U0FCTEUQAGIGcHJvdG8z"));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
           new pbr::FileDescriptor[] { },
-          new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Grpc.Testing.PayloadType), typeof(global::Grpc.Testing.CompressionType), }, new pbr::GeneratedClrTypeInfo[] {
+          new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Grpc.Testing.PayloadType), }, new pbr::GeneratedClrTypeInfo[] {
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.BoolValue), global::Grpc.Testing.BoolValue.Parser, new[]{ "Value" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Payload), global::Grpc.Testing.Payload.Parser, new[]{ "Type", "Body" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoStatus), global::Grpc.Testing.EchoStatus.Parser, new[]{ "Code", "Message" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.SimpleRequest), global::Grpc.Testing.SimpleRequest.Parser, new[]{ "ResponseType", "ResponseSize", "Payload", "FillUsername", "FillOauthScope", "ResponseCompression", "ResponseStatus" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.SimpleRequest), global::Grpc.Testing.SimpleRequest.Parser, new[]{ "ResponseType", "ResponseSize", "Payload", "FillUsername", "FillOauthScope", "ResponseCompressed", "ResponseStatus", "ExpectCompressed" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.SimpleResponse), global::Grpc.Testing.SimpleResponse.Parser, new[]{ "Payload", "Username", "OauthScope" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.StreamingInputCallRequest), global::Grpc.Testing.StreamingInputCallRequest.Parser, new[]{ "Payload" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.StreamingInputCallRequest), global::Grpc.Testing.StreamingInputCallRequest.Parser, new[]{ "Payload", "ExpectCompressed" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.StreamingInputCallResponse), global::Grpc.Testing.StreamingInputCallResponse.Parser, new[]{ "AggregatedPayloadSize" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ResponseParameters), global::Grpc.Testing.ResponseParameters.Parser, new[]{ "Size", "IntervalUs" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.StreamingOutputCallRequest), global::Grpc.Testing.StreamingOutputCallRequest.Parser, new[]{ "ResponseType", "ResponseParameters", "Payload", "ResponseCompression", "ResponseStatus" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ResponseParameters), global::Grpc.Testing.ResponseParameters.Parser, new[]{ "Size", "IntervalUs", "Compressed" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.StreamingOutputCallRequest), global::Grpc.Testing.StreamingOutputCallRequest.Parser, new[]{ "ResponseType", "ResponseParameters", "Payload", "ResponseStatus" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.StreamingOutputCallResponse), global::Grpc.Testing.StreamingOutputCallResponse.Parser, new[]{ "Payload" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ReconnectParams), global::Grpc.Testing.ReconnectParams.Parser, new[]{ "MaxReconnectBackoffMs" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ReconnectInfo), global::Grpc.Testing.ReconnectInfo.Parser, new[]{ "Passed", "BackoffMs" }, null, null, null)
@@ -74,6 +76,7 @@ namespace Grpc.Testing {
   }
   #region Enums
   /// <summary>
+  ///  DEPRECATED, don't use. To be removed shortly.
   ///  The type of payload that should be returned.
   /// </summary>
   public enum PayloadType {
@@ -81,31 +84,122 @@ namespace Grpc.Testing {
     ///  Compressable text format.
     /// </summary>
     [pbr::OriginalName("COMPRESSABLE")] Compressable = 0,
-    /// <summary>
-    ///  Uncompressable binary format.
-    /// </summary>
-    [pbr::OriginalName("UNCOMPRESSABLE")] Uncompressable = 1,
-    /// <summary>
-    ///  Randomly chosen from all other formats defined in this enum.
-    /// </summary>
-    [pbr::OriginalName("RANDOM")] Random = 2,
   }
 
+  #endregion
+
+  #region Messages
   /// <summary>
-  ///  Compression algorithms
+  ///  TODO(dgq): Go back to using well-known types once
+  ///  https://github.com/grpc/grpc/issues/6980 has been fixed.
+  ///  import "google/protobuf/wrappers.proto";
   /// </summary>
-  public enum CompressionType {
+  [global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
+  public sealed partial class BoolValue : pb::IMessage<BoolValue> {
+    private static readonly pb::MessageParser<BoolValue> _parser = new pb::MessageParser<BoolValue>(() => new BoolValue());
+    public static pb::MessageParser<BoolValue> Parser { get { return _parser; } }
+
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[0]; }
+    }
+
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    public BoolValue() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    public BoolValue(BoolValue other) : this() {
+      value_ = other.value_;
+    }
+
+    public BoolValue Clone() {
+      return new BoolValue(this);
+    }
+
+    /// <summary>Field number for the "value" field.</summary>
+    public const int ValueFieldNumber = 1;
+    private bool value_;
     /// <summary>
-    ///  No compression
+    ///  The bool value.
     /// </summary>
-    [pbr::OriginalName("NONE")] None = 0,
-    [pbr::OriginalName("GZIP")] Gzip = 1,
-    [pbr::OriginalName("DEFLATE")] Deflate = 2,
-  }
+    public bool Value {
+      get { return value_; }
+      set {
+        value_ = value;
+      }
+    }
 
-  #endregion
+    public override bool Equals(object other) {
+      return Equals(other as BoolValue);
+    }
+
+    public bool Equals(BoolValue other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if (Value != other.Value) return false;
+      return true;
+    }
+
+    public override int GetHashCode() {
+      int hash = 1;
+      if (Value != false) hash ^= Value.GetHashCode();
+      return hash;
+    }
+
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    public void WriteTo(pb::CodedOutputStream output) {
+      if (Value != false) {
+        output.WriteRawTag(8);
+        output.WriteBool(Value);
+      }
+    }
+
+    public int CalculateSize() {
+      int size = 0;
+      if (Value != false) {
+        size += 1 + 1;
+      }
+      return size;
+    }
+
+    public void MergeFrom(BoolValue other) {
+      if (other == null) {
+        return;
+      }
+      if (other.Value != false) {
+        Value = other.Value;
+      }
+    }
+
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 8: {
+            Value = input.ReadBool();
+            break;
+          }
+        }
+      }
+    }
+
+  }
 
-  #region Messages
   /// <summary>
   ///  A block of data, to simply increase gRPC message size.
   /// </summary>
@@ -115,7 +209,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<Payload> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[0]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[1]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -141,6 +235,7 @@ namespace Grpc.Testing {
     public const int TypeFieldNumber = 1;
     private global::Grpc.Testing.PayloadType type_ = 0;
     /// <summary>
+    ///  DEPRECATED, don't use. To be removed shortly.
     ///  The type of data in body.
     /// </summary>
     public global::Grpc.Testing.PayloadType Type {
@@ -255,7 +350,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<EchoStatus> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[1]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[2]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -388,7 +483,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<SimpleRequest> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[2]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[3]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -407,8 +502,9 @@ namespace Grpc.Testing {
       Payload = other.payload_ != null ? other.Payload.Clone() : null;
       fillUsername_ = other.fillUsername_;
       fillOauthScope_ = other.fillOauthScope_;
-      responseCompression_ = other.responseCompression_;
+      ResponseCompressed = other.responseCompressed_ != null ? other.ResponseCompressed.Clone() : null;
       ResponseStatus = other.responseStatus_ != null ? other.ResponseStatus.Clone() : null;
+      ExpectCompressed = other.expectCompressed_ != null ? other.ExpectCompressed.Clone() : null;
     }
 
     public SimpleRequest Clone() {
@@ -419,6 +515,7 @@ namespace Grpc.Testing {
     public const int ResponseTypeFieldNumber = 1;
     private global::Grpc.Testing.PayloadType responseType_ = 0;
     /// <summary>
+    ///  DEPRECATED, don't use. To be removed shortly.
     ///  Desired payload type in the response from the server.
     ///  If response_type is RANDOM, server randomly chooses one from other formats.
     /// </summary>
@@ -434,7 +531,6 @@ namespace Grpc.Testing {
     private int responseSize_;
     /// <summary>
     ///  Desired payload size in the response from the server.
-    ///  If response_type is COMPRESSABLE, this denotes the size before compression.
     /// </summary>
     public int ResponseSize {
       get { return responseSize_; }
@@ -482,16 +578,19 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Field number for the "response_compression" field.</summary>
-    public const int ResponseCompressionFieldNumber = 6;
-    private global::Grpc.Testing.CompressionType responseCompression_ = 0;
+    /// <summary>Field number for the "response_compressed" field.</summary>
+    public const int ResponseCompressedFieldNumber = 6;
+    private global::Grpc.Testing.BoolValue responseCompressed_;
     /// <summary>
-    ///  Compression algorithm to be used by the server for the response (stream)
+    ///  Whether to request the server to compress the response. This field is
+    ///  "nullable" in order to interoperate seamlessly with clients not able to
+    ///  implement the full compression tests by introspecting the call to verify
+    ///  the response's compression status.
     /// </summary>
-    public global::Grpc.Testing.CompressionType ResponseCompression {
-      get { return responseCompression_; }
+    public global::Grpc.Testing.BoolValue ResponseCompressed {
+      get { return responseCompressed_; }
       set {
-        responseCompression_ = value;
+        responseCompressed_ = value;
       }
     }
 
@@ -508,6 +607,19 @@ namespace Grpc.Testing {
       }
     }
 
+    /// <summary>Field number for the "expect_compressed" field.</summary>
+    public const int ExpectCompressedFieldNumber = 8;
+    private global::Grpc.Testing.BoolValue expectCompressed_;
+    /// <summary>
+    ///  Whether the server should expect this request to be compressed.
+    /// </summary>
+    public global::Grpc.Testing.BoolValue ExpectCompressed {
+      get { return expectCompressed_; }
+      set {
+        expectCompressed_ = value;
+      }
+    }
+
     public override bool Equals(object other) {
       return Equals(other as SimpleRequest);
     }
@@ -524,8 +636,9 @@ namespace Grpc.Testing {
       if (!object.Equals(Payload, other.Payload)) return false;
       if (FillUsername != other.FillUsername) return false;
       if (FillOauthScope != other.FillOauthScope) return false;
-      if (ResponseCompression != other.ResponseCompression) return false;
+      if (!object.Equals(ResponseCompressed, other.ResponseCompressed)) return false;
       if (!object.Equals(ResponseStatus, other.ResponseStatus)) return false;
+      if (!object.Equals(ExpectCompressed, other.ExpectCompressed)) return false;
       return true;
     }
 
@@ -536,8 +649,9 @@ namespace Grpc.Testing {
       if (payload_ != null) hash ^= Payload.GetHashCode();
       if (FillUsername != false) hash ^= FillUsername.GetHashCode();
       if (FillOauthScope != false) hash ^= FillOauthScope.GetHashCode();
-      if (ResponseCompression != 0) hash ^= ResponseCompression.GetHashCode();
+      if (responseCompressed_ != null) hash ^= ResponseCompressed.GetHashCode();
       if (responseStatus_ != null) hash ^= ResponseStatus.GetHashCode();
+      if (expectCompressed_ != null) hash ^= ExpectCompressed.GetHashCode();
       return hash;
     }
 
@@ -566,14 +680,18 @@ namespace Grpc.Testing {
         output.WriteRawTag(40);
         output.WriteBool(FillOauthScope);
       }
-      if (ResponseCompression != 0) {
-        output.WriteRawTag(48);
-        output.WriteEnum((int) ResponseCompression);
+      if (responseCompressed_ != null) {
+        output.WriteRawTag(50);
+        output.WriteMessage(ResponseCompressed);
       }
       if (responseStatus_ != null) {
         output.WriteRawTag(58);
         output.WriteMessage(ResponseStatus);
       }
+      if (expectCompressed_ != null) {
+        output.WriteRawTag(66);
+        output.WriteMessage(ExpectCompressed);
+      }
     }
 
     public int CalculateSize() {
@@ -593,12 +711,15 @@ namespace Grpc.Testing {
       if (FillOauthScope != false) {
         size += 1 + 1;
       }
-      if (ResponseCompression != 0) {
-        size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) ResponseCompression);
+      if (responseCompressed_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(ResponseCompressed);
       }
       if (responseStatus_ != null) {
         size += 1 + pb::CodedOutputStream.ComputeMessageSize(ResponseStatus);
       }
+      if (expectCompressed_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(ExpectCompressed);
+      }
       return size;
     }
 
@@ -624,8 +745,11 @@ namespace Grpc.Testing {
       if (other.FillOauthScope != false) {
         FillOauthScope = other.FillOauthScope;
       }
-      if (other.ResponseCompression != 0) {
-        ResponseCompression = other.ResponseCompression;
+      if (other.responseCompressed_ != null) {
+        if (responseCompressed_ == null) {
+          responseCompressed_ = new global::Grpc.Testing.BoolValue();
+        }
+        ResponseCompressed.MergeFrom(other.ResponseCompressed);
       }
       if (other.responseStatus_ != null) {
         if (responseStatus_ == null) {
@@ -633,6 +757,12 @@ namespace Grpc.Testing {
         }
         ResponseStatus.MergeFrom(other.ResponseStatus);
       }
+      if (other.expectCompressed_ != null) {
+        if (expectCompressed_ == null) {
+          expectCompressed_ = new global::Grpc.Testing.BoolValue();
+        }
+        ExpectCompressed.MergeFrom(other.ExpectCompressed);
+      }
     }
 
     public void MergeFrom(pb::CodedInputStream input) {
@@ -665,8 +795,11 @@ namespace Grpc.Testing {
             FillOauthScope = input.ReadBool();
             break;
           }
-          case 48: {
-            responseCompression_ = (global::Grpc.Testing.CompressionType) input.ReadEnum();
+          case 50: {
+            if (responseCompressed_ == null) {
+              responseCompressed_ = new global::Grpc.Testing.BoolValue();
+            }
+            input.ReadMessage(responseCompressed_);
             break;
           }
           case 58: {
@@ -676,6 +809,13 @@ namespace Grpc.Testing {
             input.ReadMessage(responseStatus_);
             break;
           }
+          case 66: {
+            if (expectCompressed_ == null) {
+              expectCompressed_ = new global::Grpc.Testing.BoolValue();
+            }
+            input.ReadMessage(expectCompressed_);
+            break;
+          }
         }
       }
     }
@@ -691,7 +831,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<SimpleResponse> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[3]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[4]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -867,7 +1007,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<StreamingInputCallRequest> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[4]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[5]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -882,6 +1022,7 @@ namespace Grpc.Testing {
 
     public StreamingInputCallRequest(StreamingInputCallRequest other) : this() {
       Payload = other.payload_ != null ? other.Payload.Clone() : null;
+      ExpectCompressed = other.expectCompressed_ != null ? other.ExpectCompressed.Clone() : null;
     }
 
     public StreamingInputCallRequest Clone() {
@@ -901,6 +1042,22 @@ namespace Grpc.Testing {
       }
     }
 
+    /// <summary>Field number for the "expect_compressed" field.</summary>
+    public const int ExpectCompressedFieldNumber = 2;
+    private global::Grpc.Testing.BoolValue expectCompressed_;
+    /// <summary>
+    ///  Whether the server should expect this request to be compressed. This field
+    ///  is "nullable" in order to interoperate seamlessly with servers not able to
+    ///  implement the full compression tests by introspecting the call to verify
+    ///  the request's compression status.
+    /// </summary>
+    public global::Grpc.Testing.BoolValue ExpectCompressed {
+      get { return expectCompressed_; }
+      set {
+        expectCompressed_ = value;
+      }
+    }
+
     public override bool Equals(object other) {
       return Equals(other as StreamingInputCallRequest);
     }
@@ -913,12 +1070,14 @@ namespace Grpc.Testing {
         return true;
       }
       if (!object.Equals(Payload, other.Payload)) return false;
+      if (!object.Equals(ExpectCompressed, other.ExpectCompressed)) return false;
       return true;
     }
 
     public override int GetHashCode() {
       int hash = 1;
       if (payload_ != null) hash ^= Payload.GetHashCode();
+      if (expectCompressed_ != null) hash ^= ExpectCompressed.GetHashCode();
       return hash;
     }
 
@@ -931,6 +1090,10 @@ namespace Grpc.Testing {
         output.WriteRawTag(10);
         output.WriteMessage(Payload);
       }
+      if (expectCompressed_ != null) {
+        output.WriteRawTag(18);
+        output.WriteMessage(ExpectCompressed);
+      }
     }
 
     public int CalculateSize() {
@@ -938,6 +1101,9 @@ namespace Grpc.Testing {
       if (payload_ != null) {
         size += 1 + pb::CodedOutputStream.ComputeMessageSize(Payload);
       }
+      if (expectCompressed_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(ExpectCompressed);
+      }
       return size;
     }
 
@@ -951,6 +1117,12 @@ namespace Grpc.Testing {
         }
         Payload.MergeFrom(other.Payload);
       }
+      if (other.expectCompressed_ != null) {
+        if (expectCompressed_ == null) {
+          expectCompressed_ = new global::Grpc.Testing.BoolValue();
+        }
+        ExpectCompressed.MergeFrom(other.ExpectCompressed);
+      }
     }
 
     public void MergeFrom(pb::CodedInputStream input) {
@@ -967,6 +1139,13 @@ namespace Grpc.Testing {
             input.ReadMessage(payload_);
             break;
           }
+          case 18: {
+            if (expectCompressed_ == null) {
+              expectCompressed_ = new global::Grpc.Testing.BoolValue();
+            }
+            input.ReadMessage(expectCompressed_);
+            break;
+          }
         }
       }
     }
@@ -982,7 +1161,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<StreamingInputCallResponse> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[5]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[6]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -1091,7 +1270,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<ResponseParameters> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[6]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[7]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -1107,6 +1286,7 @@ namespace Grpc.Testing {
     public ResponseParameters(ResponseParameters other) : this() {
       size_ = other.size_;
       intervalUs_ = other.intervalUs_;
+      Compressed = other.compressed_ != null ? other.Compressed.Clone() : null;
     }
 
     public ResponseParameters Clone() {
@@ -1118,7 +1298,6 @@ namespace Grpc.Testing {
     private int size_;
     /// <summary>
     ///  Desired payload sizes in responses from the server.
-    ///  If response_type is COMPRESSABLE, this denotes the size before compression.
     /// </summary>
     public int Size {
       get { return size_; }
@@ -1141,6 +1320,22 @@ namespace Grpc.Testing {
       }
     }
 
+    /// <summary>Field number for the "compressed" field.</summary>
+    public const int CompressedFieldNumber = 3;
+    private global::Grpc.Testing.BoolValue compressed_;
+    /// <summary>
+    ///  Whether to request the server to compress the response. This field is
+    ///  "nullable" in order to interoperate seamlessly with clients not able to
+    ///  implement the full compression tests by introspecting the call to verify
+    ///  the response's compression status.
+    /// </summary>
+    public global::Grpc.Testing.BoolValue Compressed {
+      get { return compressed_; }
+      set {
+        compressed_ = value;
+      }
+    }
+
     public override bool Equals(object other) {
       return Equals(other as ResponseParameters);
     }
@@ -1154,6 +1349,7 @@ namespace Grpc.Testing {
       }
       if (Size != other.Size) return false;
       if (IntervalUs != other.IntervalUs) return false;
+      if (!object.Equals(Compressed, other.Compressed)) return false;
       return true;
     }
 
@@ -1161,6 +1357,7 @@ namespace Grpc.Testing {
       int hash = 1;
       if (Size != 0) hash ^= Size.GetHashCode();
       if (IntervalUs != 0) hash ^= IntervalUs.GetHashCode();
+      if (compressed_ != null) hash ^= Compressed.GetHashCode();
       return hash;
     }
 
@@ -1177,6 +1374,10 @@ namespace Grpc.Testing {
         output.WriteRawTag(16);
         output.WriteInt32(IntervalUs);
       }
+      if (compressed_ != null) {
+        output.WriteRawTag(26);
+        output.WriteMessage(Compressed);
+      }
     }
 
     public int CalculateSize() {
@@ -1187,6 +1388,9 @@ namespace Grpc.Testing {
       if (IntervalUs != 0) {
         size += 1 + pb::CodedOutputStream.ComputeInt32Size(IntervalUs);
       }
+      if (compressed_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(Compressed);
+      }
       return size;
     }
 
@@ -1200,6 +1404,12 @@ namespace Grpc.Testing {
       if (other.IntervalUs != 0) {
         IntervalUs = other.IntervalUs;
       }
+      if (other.compressed_ != null) {
+        if (compressed_ == null) {
+          compressed_ = new global::Grpc.Testing.BoolValue();
+        }
+        Compressed.MergeFrom(other.Compressed);
+      }
     }
 
     public void MergeFrom(pb::CodedInputStream input) {
@@ -1217,6 +1427,13 @@ namespace Grpc.Testing {
             IntervalUs = input.ReadInt32();
             break;
           }
+          case 26: {
+            if (compressed_ == null) {
+              compressed_ = new global::Grpc.Testing.BoolValue();
+            }
+            input.ReadMessage(compressed_);
+            break;
+          }
         }
       }
     }
@@ -1232,7 +1449,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<StreamingOutputCallRequest> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[7]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[8]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -1249,7 +1466,6 @@ namespace Grpc.Testing {
       responseType_ = other.responseType_;
       responseParameters_ = other.responseParameters_.Clone();
       Payload = other.payload_ != null ? other.Payload.Clone() : null;
-      responseCompression_ = other.responseCompression_;
       ResponseStatus = other.responseStatus_ != null ? other.ResponseStatus.Clone() : null;
     }
 
@@ -1261,6 +1477,7 @@ namespace Grpc.Testing {
     public const int ResponseTypeFieldNumber = 1;
     private global::Grpc.Testing.PayloadType responseType_ = 0;
     /// <summary>
+    ///  DEPRECATED, don't use. To be removed shortly.
     ///  Desired payload type in the response from the server.
     ///  If response_type is RANDOM, the payload from each response in the stream
     ///  might be of different types. This is to simulate a mixed type of payload
@@ -1298,19 +1515,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Field number for the "response_compression" field.</summary>
-    public const int ResponseCompressionFieldNumber = 6;
-    private global::Grpc.Testing.CompressionType responseCompression_ = 0;
-    /// <summary>
-    ///  Compression algorithm to be used by the server for the response (stream)
-    /// </summary>
-    public global::Grpc.Testing.CompressionType ResponseCompression {
-      get { return responseCompression_; }
-      set {
-        responseCompression_ = value;
-      }
-    }
-
     /// <summary>Field number for the "response_status" field.</summary>
     public const int ResponseStatusFieldNumber = 7;
     private global::Grpc.Testing.EchoStatus responseStatus_;
@@ -1338,7 +1542,6 @@ namespace Grpc.Testing {
       if (ResponseType != other.ResponseType) return false;
       if(!responseParameters_.Equals(other.responseParameters_)) return false;
       if (!object.Equals(Payload, other.Payload)) return false;
-      if (ResponseCompression != other.ResponseCompression) return false;
       if (!object.Equals(ResponseStatus, other.ResponseStatus)) return false;
       return true;
     }
@@ -1348,7 +1551,6 @@ namespace Grpc.Testing {
       if (ResponseType != 0) hash ^= ResponseType.GetHashCode();
       hash ^= responseParameters_.GetHashCode();
       if (payload_ != null) hash ^= Payload.GetHashCode();
-      if (ResponseCompression != 0) hash ^= ResponseCompression.GetHashCode();
       if (responseStatus_ != null) hash ^= ResponseStatus.GetHashCode();
       return hash;
     }
@@ -1367,10 +1569,6 @@ namespace Grpc.Testing {
         output.WriteRawTag(26);
         output.WriteMessage(Payload);
       }
-      if (ResponseCompression != 0) {
-        output.WriteRawTag(48);
-        output.WriteEnum((int) ResponseCompression);
-      }
       if (responseStatus_ != null) {
         output.WriteRawTag(58);
         output.WriteMessage(ResponseStatus);
@@ -1386,9 +1584,6 @@ namespace Grpc.Testing {
       if (payload_ != null) {
         size += 1 + pb::CodedOutputStream.ComputeMessageSize(Payload);
       }
-      if (ResponseCompression != 0) {
-        size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) ResponseCompression);
-      }
       if (responseStatus_ != null) {
         size += 1 + pb::CodedOutputStream.ComputeMessageSize(ResponseStatus);
       }
@@ -1409,9 +1604,6 @@ namespace Grpc.Testing {
         }
         Payload.MergeFrom(other.Payload);
       }
-      if (other.ResponseCompression != 0) {
-        ResponseCompression = other.ResponseCompression;
-      }
       if (other.responseStatus_ != null) {
         if (responseStatus_ == null) {
           responseStatus_ = new global::Grpc.Testing.EchoStatus();
@@ -1442,10 +1634,6 @@ namespace Grpc.Testing {
             input.ReadMessage(payload_);
             break;
           }
-          case 48: {
-            responseCompression_ = (global::Grpc.Testing.CompressionType) input.ReadEnum();
-            break;
-          }
           case 58: {
             if (responseStatus_ == null) {
               responseStatus_ = new global::Grpc.Testing.EchoStatus();
@@ -1468,7 +1656,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<StreamingOutputCallResponse> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[8]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[9]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -1584,7 +1772,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<ReconnectParams> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[9]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[10]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
@@ -1692,7 +1880,7 @@ namespace Grpc.Testing {
     public static pb::MessageParser<ReconnectInfo> Parser { get { return _parser; } }
 
     public static pbr::MessageDescriptor Descriptor {
-      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[10]; }
+      get { return global::Grpc.Testing.MessagesReflection.Descriptor.MessageTypes[11]; }
     }
 
     pbr::MessageDescriptor pb::IMessage.Descriptor {
diff --git a/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs b/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
index f95af5008f1c30c75729ce55bef7b4e6c31eb530..eb3bb8a28bbce51bb2ee390e7f986d6baf491110 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
@@ -40,7 +40,6 @@ using System.Threading.Tasks;
 using Grpc.Core;
 using Grpc.Core.Utils;
 using Grpc.Testing;
-using Moq;
 using NUnit.Framework;
 
 namespace Grpc.IntegrationTesting
@@ -52,19 +51,14 @@ namespace Grpc.IntegrationTesting
         Channel channel;
         TestService.TestServiceClient client;
         List<ChannelOption> options;
-        Mock<TestService.TestServiceBase> serviceMock;
         AsyncAuthInterceptor asyncAuthInterceptor;
 
         [SetUp]
         public void Init()
         {
-            serviceMock = new Mock<TestService.TestServiceBase>();
-            serviceMock.Setup(m => m.UnaryCall(It.IsAny<SimpleRequest>(), It.IsAny<ServerCallContext>()))
-                .Returns(new Func<SimpleRequest, ServerCallContext, Task<SimpleResponse>>(UnaryCallHandler));
-
             server = new Server
             {
-                Services = { TestService.BindService(serviceMock.Object) },
+                Services = { TestService.BindService(new FakeTestService()) },
                 Ports = { { Host, ServerPort.PickUnused, TestCredentials.CreateSslServerCredentials() } }
             };
             server.Start();
@@ -94,26 +88,29 @@ namespace Grpc.IntegrationTesting
             var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(),
                 CallCredentials.FromInterceptor(asyncAuthInterceptor));
             channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
-            client = TestService.NewClient(channel);
+            client = new TestService.TestServiceClient(channel);
 
-            client.UnaryCall(new SimpleRequest {});
+            client.UnaryCall(new SimpleRequest { });
         }
 
         [Test]
         public void MetadataCredentials_PerCall()
         {
             channel = new Channel(Host, server.Ports.Single().BoundPort, TestCredentials.CreateSslCredentials(), options);
-            client = TestService.NewClient(channel);
+            client = new TestService.TestServiceClient(channel);
 
             var callCredentials = CallCredentials.FromInterceptor(asyncAuthInterceptor);
             client.UnaryCall(new SimpleRequest { }, new CallOptions(credentials: callCredentials));
         }
 
-        private Task<SimpleResponse> UnaryCallHandler(SimpleRequest request, ServerCallContext context)
+        private class FakeTestService : TestService.TestServiceBase
         {
-            var authToken = context.RequestHeaders.First((entry) => entry.Key == "authorization").Value;
-            Assert.AreEqual("SECRET_TOKEN", authToken);
-            return Task.FromResult(new SimpleResponse());
+            public override Task<SimpleResponse> UnaryCall(SimpleRequest request, ServerCallContext context)
+            {
+                var authToken = context.RequestHeaders.First((entry) => entry.Key == "authorization").Value;
+                Assert.AreEqual("SECRET_TOKEN", authToken);
+                return Task.FromResult(new SimpleResponse());
+            }
         }
     }
 }
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index 22bd27ec0aa5ba57c8a3e57702d92274dca18118..040798e3c2147351dc6cec52024cfe80bdac5a6f 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -97,17 +97,22 @@ namespace Grpc.Testing {
     /// <summary>Client for MetricsService</summary>
     public class MetricsServiceClient : ClientBase<MetricsServiceClient>
     {
+      /// <summary>Creates a new client for MetricsService</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public MetricsServiceClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for MetricsService that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public MetricsServiceClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected MetricsServiceClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected MetricsServiceClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -162,12 +167,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Creates a new client for MetricsService</summary>
-    public static MetricsServiceClient NewClient(Channel channel)
-    {
-      return new MetricsServiceClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(MetricsServiceBase serviceImpl)
     {
diff --git a/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs b/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs
index d8902de08f53610bb8ed75b0fb6d44964dd8014b..100ff0b5de9c3d7529e7cfd0613afe56eb447b77 100644
--- a/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs
+++ b/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs
@@ -49,7 +49,7 @@ namespace Grpc.IntegrationTesting
         {
             // Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
             GrpcEnvironment.SetLogger(new TextWriterLogger(Console.Error));
-#if DOTNET5_4
+#if NETSTANDARD1_5
             return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
 #else
             return new AutoRun().Execute(args);
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index 9c99296115ce4a7f40f8c5479f053e17e19fcd56..e205dea93ea1288ffe61dbc78606831102cf8839 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -93,17 +93,22 @@ namespace Grpc.Testing {
     /// <summary>Client for BenchmarkService</summary>
     public class BenchmarkServiceClient : ClientBase<BenchmarkServiceClient>
     {
+      /// <summary>Creates a new client for BenchmarkService</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public BenchmarkServiceClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for BenchmarkService that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public BenchmarkServiceClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected BenchmarkServiceClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected BenchmarkServiceClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -162,12 +167,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Creates a new client for BenchmarkService</summary>
-    public static BenchmarkServiceClient NewClient(Channel channel)
-    {
-      return new BenchmarkServiceClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(BenchmarkServiceBase serviceImpl)
     {
@@ -273,17 +272,22 @@ namespace Grpc.Testing {
     /// <summary>Client for WorkerService</summary>
     public class WorkerServiceClient : ClientBase<WorkerServiceClient>
     {
+      /// <summary>Creates a new client for WorkerService</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public WorkerServiceClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for WorkerService that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public WorkerServiceClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected WorkerServiceClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected WorkerServiceClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -398,12 +402,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Creates a new client for WorkerService</summary>
-    public static WorkerServiceClient NewClient(Channel channel)
-    {
-      return new WorkerServiceClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(WorkerServiceBase serviceImpl)
     {
diff --git a/src/csharp/Grpc.IntegrationTesting/SslCredentialsTest.cs b/src/csharp/Grpc.IntegrationTesting/SslCredentialsTest.cs
index 3df45b5f7084f5748a587e8f2b2ecd5b3adbdf86..f85e272711f96dd8e6b921b149bf0cfc7fbdc5c2 100644
--- a/src/csharp/Grpc.IntegrationTesting/SslCredentialsTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/SslCredentialsTest.cs
@@ -79,7 +79,7 @@ namespace Grpc.IntegrationTesting
             };
 
             channel = new Channel(Host, server.Ports.Single().BoundPort, clientCredentials, options);
-            client = TestService.NewClient(channel);
+            client = new TestService.TestServiceClient(channel);
         }
 
         [TestFixtureTearDown]
diff --git a/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs b/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
index 4d6ca7ece57834a32ce62638b4e943402681eb83..74ee040ae49657e002160646f081e0bc1eddf432 100644
--- a/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
@@ -148,7 +148,7 @@ namespace Grpc.IntegrationTesting
                     channels.Add(channel);
                     for (int j = 0; j < options.NumStubsPerChannel; j++)
                     {
-                        var client = TestService.NewClient(channel);
+                        var client = new TestService.TestServiceClient(channel);
                         var task = Task.Factory.StartNew(() => RunBodyAsync(client).GetAwaiter().GetResult(),
                             TaskCreationOptions.LongRunning);
                         tasks.Add(task);  
diff --git a/src/csharp/Grpc.IntegrationTesting/TestCredentials.cs b/src/csharp/Grpc.IntegrationTesting/TestCredentials.cs
index 774563d752fd3573f7ff6686e417eb93a586d32d..60b9cf4e0b7385ca802804e43c78fcdd3070746a 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestCredentials.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestCredentials.cs
@@ -90,7 +90,7 @@ namespace Grpc.IntegrationTesting
 
         private static string GetPath(string relativePath)
         {
-            var assemblyDir = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location);
+            var assemblyDir = Path.GetDirectoryName(typeof(TestCredentials).GetTypeInfo().Assembly.Location);
             return Path.Combine(assemblyDir, relativePath);
         }
     }
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index 6c252013f84cd15e86a4b6f72abe6e5421915956..3e149da3e01312832169ed5a808b96188c809faf 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -168,17 +168,22 @@ namespace Grpc.Testing {
     /// <summary>Client for TestService</summary>
     public class TestServiceClient : ClientBase<TestServiceClient>
     {
+      /// <summary>Creates a new client for TestService</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public TestServiceClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for TestService that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public TestServiceClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected TestServiceClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected TestServiceClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -315,12 +320,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Creates a new client for TestService</summary>
-    public static TestServiceClient NewClient(Channel channel)
-    {
-      return new TestServiceClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(TestServiceBase serviceImpl)
     {
@@ -373,17 +372,22 @@ namespace Grpc.Testing {
     /// <summary>Client for UnimplementedService</summary>
     public class UnimplementedServiceClient : ClientBase<UnimplementedServiceClient>
     {
+      /// <summary>Creates a new client for UnimplementedService</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public UnimplementedServiceClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for UnimplementedService that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public UnimplementedServiceClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected UnimplementedServiceClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected UnimplementedServiceClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -422,12 +426,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Creates a new client for UnimplementedService</summary>
-    public static UnimplementedServiceClient NewClient(Channel channel)
-    {
-      return new UnimplementedServiceClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(UnimplementedServiceBase serviceImpl)
     {
@@ -485,17 +483,22 @@ namespace Grpc.Testing {
     /// <summary>Client for ReconnectService</summary>
     public class ReconnectServiceClient : ClientBase<ReconnectServiceClient>
     {
+      /// <summary>Creates a new client for ReconnectService</summary>
+      /// <param name="channel">The channel to use to make remote calls.</param>
       public ReconnectServiceClient(Channel channel) : base(channel)
       {
       }
+      /// <summary>Creates a new client for ReconnectService that uses a custom <c>CallInvoker</c>.</summary>
+      /// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
       public ReconnectServiceClient(CallInvoker callInvoker) : base(callInvoker)
       {
       }
-      ///<summary>Protected parameterless constructor to allow creation of test doubles.</summary>
+      /// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
       protected ReconnectServiceClient() : base()
       {
       }
-      ///<summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <summary>Protected constructor to allow creation of configured clients.</summary>
+      /// <param name="configuration">The client configuration.</param>
       protected ReconnectServiceClient(ClientBaseConfiguration configuration) : base(configuration)
       {
       }
@@ -538,12 +541,6 @@ namespace Grpc.Testing {
       }
     }
 
-    /// <summary>Creates a new client for ReconnectService</summary>
-    public static ReconnectServiceClient NewClient(Channel channel)
-    {
-      return new ReconnectServiceClient(channel);
-    }
-
     /// <summary>Creates service definition that can be registered with a server</summary>
     public static ServerServiceDefinition BindService(ReconnectServiceBase serviceImpl)
     {
diff --git a/src/csharp/Grpc.IntegrationTesting/project.json b/src/csharp/Grpc.IntegrationTesting/project.json
new file mode 100644
index 0000000000000000000000000000000000000000..6297600ddc31e58995f532e89c09dad7b8e76615
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/project.json
@@ -0,0 +1,86 @@
+{
+  "buildOptions": {
+    "emitEntryPoint": true
+  },
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          "include": "data/*",
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+
+  "dependencies": {
+    "Grpc.Auth": {
+      "target": "project"
+    },
+    "Grpc.Core": {
+      "target": "project"
+    },
+    "Google.Protobuf": "3.0.0-beta3",
+    "CommandLineParser": "1.9.71",
+    "NUnit": "3.2.0",
+    "NUnitLite": "3.2.0-*"
+  },
+  "frameworks": {
+    "net45": {
+      "dependencies": {
+        "Moq": "4.2.1510.2205"
+      },
+      "frameworkAssemblies": {
+        "System.Runtime": "",
+        "System.IO": ""
+      }
+    },
+    "netstandard1.5": {
+      "imports": [
+        "portable-net45",
+        "net45"
+      ],
+      "dependencies": {
+        "NETStandard.Library": "1.5.0-rc2-24027",
+        "System.Linq.Expressions": "4.0.11-rc2-24027"
+      }
+    }
+  }
+}
diff --git a/src/csharp/README.md b/src/csharp/README.md
index 201c5ab0b56528cf9153945dd066832cc96e5b64..18d5945a8a1ac53868c271021eeaab5da6cad1a2 100644
--- a/src/csharp/README.md
+++ b/src/csharp/README.md
@@ -19,33 +19,13 @@ PREREQUISITES
 HOW TO USE
 --------------
 
-**Windows**
-
-- Open Visual Studio and start a new project/solution.
-
-- Add NuGet package `Grpc` as a dependency (Project options -> Manage NuGet Packages).
-  That will also pull all the transitive dependencies (including the gRPC native library that
-  gRPC C# is using internally).
-
-**Linux (Debian)**
-
-- Open MonoDevelop and start a new project/solution.
+**Windows, Linux, Mac OS X**
 
-- Add NuGet package `Grpc` as a dependency (Project -> Add NuGet packages).
-  That will also pull all the transitive dependencies (including the gRPC native library that
-  gRPC C# is using internally).
+- Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project/solution.
 
-- NOTE: gRPC C# doesn't have a good story yet for shipping precompiled Linux version of Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin. You can install them using [gRPC Linuxbrew instructions][].
+- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages).
 
-**Mac OS X**
-
-- Open Xamarin Studio and start a new project/solution.
-
-- Add NuGet package `Grpc` as a dependency (Project -> Add NuGet packages).
-  That will also pull all the transitive dependencies (including the gRPC native library that
-  gRPC C# is using internally).
-
-- NOTE: gRPC C# doesn't have a good story yet for shipping precompiled Mac OS X version of Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin. You can install them using [gRPC Homebrew instructions][].
+- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin.
 
 BUILD FROM SOURCE
 -----------------
@@ -61,26 +41,15 @@ If you are a user of gRPC C#, go to Usage section above.
 - Open `src\csharp\Grpc.sln` (path is relative to gRPC repository root)
   using Visual Studio
 
-**Linux**
+**Linux and Mac OS X**
 
 - The grpc_csharp_ext native library needs to be built so you can build the gRPC C# solution:
-  ```sh
-  # from the gRPC repository root
-  $ make CONFIG=dbg grpc_csharp_ext
-  ```
-
-- Use MonoDevelop to open the solution Grpc.sln
-
-**Mac OS X**
-
-- The grpc_csharp_ext native library needs to be built so you can build the gRPC C# solution.
-
   ```sh
   # from the gRPC repository root
   $ tools/run_tests/run_tests.py -c dbg -l csharp --build_only
   ```
 
-- Use Xamarin Studio to open the solution Grpc.sln
+- Use MonoDevelop / Xamarin Studio to open the solution Grpc.sln
 
 RUNNING TESTS
 -------------
@@ -100,10 +69,17 @@ different languages.
 tools/run_tests/run_tests.py -l csharp
 ```
 
+ON .NET CORE SUPPORT
+------------------
+
+We are committed to providing full support for [.NET Core](https://dotnet.github.io/) in near future,
+but currently, the support is for .NET Core is experimental/work-in-progress.
+
 DOCUMENTATION
 -------------
-- the gRPC C# reference documentation is available online at [grpc.io][]
-- [Helloworld example][]
+- [API Reference][]
+- [Helloworld Example][]
+- [RouteGuide Tutorial][]
 
 CONTENTS
 --------
@@ -111,15 +87,15 @@ CONTENTS
 - ext:
   The extension library that wraps C API to be more digestible by C#.
 - Grpc.Auth:
-  gRPC OAuth2 support.
+  gRPC OAuth2/JWT support.
 - Grpc.Core:
   The main gRPC C# library.
 - Grpc.Examples:
   API examples for math.proto
 - Grpc.Examples.MathClient:
-  An example client that sends some requests to math server.
+  An example client that sends requests to math server.
 - Grpc.Examples.MathServer:
-  An example client that sends some requests to math server.
+  An example server that implements a simple math service.
 - Grpc.IntegrationTesting:
   Cross-language gRPC implementation testing (interop testing).
 
@@ -130,10 +106,6 @@ Internally, gRPC C# uses a native library written in C (gRPC C core) and invokes
 
 Prior to version 0.13, installing `grpc_csharp_ext` was required to make gRPC work on Linux and MacOS. Starting with version 0.13, we have improved the packaging story significantly and precompiled versions of the native library for all supported platforms are now shipped with the NuGet package. Just installing the `Grpc` NuGet package should be the only step needed to use gRPC C#, regardless of your platform (Windows, Linux or Mac) and the bitness (32 or 64bit).
 
-[gRPC Linuxbrew instructions]:https://github.com/grpc/homebrew-grpc#quick-install-linux
-[gRPC Homebrew instructions]:https://github.com/grpc/homebrew-grpc#quick-install-linux
-[homebrew]:http://brew.sh
-[gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install
-[grpc.io]: http://www.grpc.io/docs/installation/csharp.html
-[Debian jessie-backports]:http://backports.debian.org/Instructions/
-[Helloworld example]:../../examples/csharp/helloworld
+[API Reference]: http://www.grpc.io/grpc/csharp/
+[Helloworld Example]: ../../examples/csharp/helloworld
+[RouteGuide Tutorial]: http://www.grpc.io/docs/tutorials/basic/csharp.html 
diff --git a/src/csharp/build_packages.bat b/src/csharp/build_packages.bat
index 1cc63da9703b84435c52f492220d97f47dd591f4..f05c0241b65770ff61c18e48665eb2aa6f141683 100644
--- a/src/csharp/build_packages.bat
+++ b/src/csharp/build_packages.bat
@@ -30,7 +30,7 @@
 @rem Builds gRPC NuGet packages
 
 @rem Current package versions
-set VERSION=0.15.0-dev
+set VERSION=1.1.0-dev
 set PROTOBUF_VERSION=3.0.0-beta3
 
 @rem Packages that depend on prerelease packages (like Google.Protobuf) need to have prerelease suffix as well.
@@ -41,12 +41,12 @@ set NUGET=C:\nuget\nuget.exe
 
 @rem Collect the artifacts built by the previous build step if running on Jenkins
 @rem TODO(jtattermusch): is there a better way to do this?
-xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=windows\artifacts\* Grpc.Core\windows_x86\
-xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=windows\artifacts\* Grpc.Core\windows_x64\
-xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=linux\artifacts\* Grpc.Core\linux_x86\
-xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=linux\artifacts\* Grpc.Core\linux_x64\
-xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=macos\artifacts\* Grpc.Core\macosx_x86\
-xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=macos\artifacts\* Grpc.Core\macosx_x64\
+xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=windows\artifacts\* nativelibs\windows_x86\
+xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=windows\artifacts\* nativelibs\windows_x64\
+xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=linux\artifacts\* nativelibs\linux_x86\
+xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=linux\artifacts\* nativelibs\linux_x64\
+xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=macos\artifacts\* nativelibs\macosx_x86\
+xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=macos\artifacts\* nativelibs\macosx_x64\
 
 @rem Collect protoc artifacts built by the previous build step
 xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=windows\artifacts\* protoc_plugins\windows_x86\
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index 4782654250caddcde32cde6ff343cdd2b28e80de..c670ea65c79995877789e2f03691bfa64b1c4414 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -249,10 +249,13 @@ grpcsharp_batch_context_recv_initial_metadata(
 
 GPR_EXPORT intptr_t GPR_CALLTYPE grpcsharp_batch_context_recv_message_length(
     const grpcsharp_batch_context *ctx) {
+  grpc_byte_buffer_reader reader;
   if (!ctx->recv_message) {
     return -1;
   }
-  return (intptr_t)grpc_byte_buffer_length(ctx->recv_message);
+  /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */
+  grpc_byte_buffer_reader_init(&reader, ctx->recv_message);
+  return (intptr_t)grpc_byte_buffer_length(reader.buffer_out);
 }
 
 /*
@@ -265,6 +268,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_recv_message_to_buffer(
   gpr_slice slice;
   size_t offset = 0;
 
+  /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */
   grpc_byte_buffer_reader_init(&reader, ctx->recv_message);
 
   while (grpc_byte_buffer_reader_next(&reader, &slice)) {
diff --git a/src/csharp/grpc.native.csharp/grpc.native.csharp.nuspec b/src/csharp/grpc.native.csharp/grpc.native.csharp.nuspec
deleted file mode 100644
index cc688e2bc718060b931a243a4c8296785294bb1b..0000000000000000000000000000000000000000
--- a/src/csharp/grpc.native.csharp/grpc.native.csharp.nuspec
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<package>
-  <metadata>
-    <id>grpc.native.csharp</id>
-    <version>$version$</version>
-    <authors>Google Inc.</authors>
-    <owners>grpc-packages</owners>
-    <licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
-    <projectUrl>http://github.com/grpc/grpc</projectUrl>
-    <requireLicenseAcceptance>false</requireLicenseAcceptance>
-    <description>Native extension needed by gRPC C# library. This is not the package you are looking for, it is only meant to be used as a dependency.</description>
-    <releaseNotes>Release of gRPC C core $version$ libraries.</releaseNotes>
-    <copyright>Copyright 2015</copyright>
-    <title>gRPC C# Native Extension</title>
-    <summary>Native library required by gRPC C#</summary>
-    <tags>gRPC native</tags>
-  </metadata>
-  <files>
-    <file src="grpc.native.csharp.targets" target="\build\portable-net45+netcore45+wpa81+wp8\grpc.native.csharp.targets" />
-    <file src="windows_x86/grpc_csharp_ext.dll" target="/build/native/bin/windows_x86/grpc_csharp_ext.dll" />
-    <file src="windows_x64/grpc_csharp_ext.dll" target="/build/native/bin/windows_x64/grpc_csharp_ext.dll" />
-    <file src="linux_x86/libgrpc_csharp_ext.so" target="/build/native/bin/linux_x86/libgrpc_csharp_ext.so" />
-    <file src="linux_x64/libgrpc_csharp_ext.so" target="/build/native/bin/linux_x64/libgrpc_csharp_ext.so" />
-    <file src="macosx_x86/libgrpc_csharp_ext.dylib" target="/build/native/bin/macosx_x86/libgrpc_csharp_ext.dylib" />
-    <file src="macosx_x64/libgrpc_csharp_ext.dylib" target="/build/native/bin/macosx_x64/libgrpc_csharp_ext.dylib" />
-  </files>
-</package>
diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc
index 3479a67702976377e58373bd8618d2aa7ac720fe..a3f678f32c610e13d6cb151086e8a6b8b2bb7297 100644
--- a/src/node/ext/byte_buffer.cc
+++ b/src/node/ext/byte_buffer.cc
@@ -73,7 +73,10 @@ Local<Value> ByteBufferToBuffer(grpc_byte_buffer *buffer) {
     return scope.Escape(Nan::Null());
   }
   grpc_byte_buffer_reader reader;
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  if (!grpc_byte_buffer_reader_init(&reader, buffer)) {
+    Nan::ThrowError("Error initializing byte buffer reader.");
+    return scope.Escape(Nan::Undefined());
+  }
   gpr_slice slice = grpc_byte_buffer_reader_readall(&reader);
   size_t length = GPR_SLICE_LENGTH(slice);
   char *result = new char[length];
diff --git a/src/node/ext/node_grpc.cc b/src/node/ext/node_grpc.cc
index f18ce01c6fd0e3743bf2a7da0f23e1ea17967484..745b5023d591a0565339c11d309ae7e01ef5075d 100644
--- a/src/node/ext/node_grpc.cc
+++ b/src/node/ext/node_grpc.cc
@@ -31,12 +31,16 @@
  *
  */
 
+#include <list>
+
 #include <node.h>
 #include <nan.h>
 #include <v8.h>
 #include "grpc/grpc.h"
 #include "grpc/grpc_security.h"
 #include "grpc/support/alloc.h"
+#include "grpc/support/log.h"
+#include "grpc/support/time.h"
 
 #include "call.h"
 #include "call_credentials.h"
@@ -45,14 +49,32 @@
 #include "server.h"
 #include "completion_queue_async_worker.h"
 #include "server_credentials.h"
+#include "timeval.h"
 
 using v8::FunctionTemplate;
 using v8::Local;
 using v8::Value;
+using v8::Number;
 using v8::Object;
 using v8::Uint32;
 using v8::String;
 
+typedef struct log_args {
+  gpr_log_func_args core_args;
+  gpr_timespec timestamp;
+} log_args;
+
+typedef struct logger_state {
+  Nan::Callback *callback;
+  std::list<log_args *> *pending_args;
+  uv_mutex_t mutex;
+  uv_async_t async;
+  // Indicates that a logger has been set
+  bool logger_set;
+} logger_state;
+
+logger_state grpc_logger_state;
+
 static char *pem_root_certs = NULL;
 
 void InitStatusConstants(Local<Object> exports) {
@@ -235,6 +257,18 @@ void InitWriteFlags(Local<Object> exports) {
   Nan::Set(write_flags, Nan::New("NO_COMPRESS").ToLocalChecked(), NO_COMPRESS);
 }
 
+void InitLogConstants(Local<Object> exports) {
+  Nan::HandleScope scope;
+  Local<Object> log_verbosity = Nan::New<Object>();
+  Nan::Set(exports, Nan::New("logVerbosity").ToLocalChecked(), log_verbosity);
+  Local<Value> DEBUG(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_DEBUG));
+  Nan::Set(log_verbosity, Nan::New("DEBUG").ToLocalChecked(), DEBUG);
+  Local<Value> INFO(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_INFO));
+  Nan::Set(log_verbosity, Nan::New("INFO").ToLocalChecked(), INFO);
+  Local<Value> LOG_ERROR(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_ERROR));
+  Nan::Set(log_verbosity, Nan::New("ERROR").ToLocalChecked(), LOG_ERROR);
+}
+
 NAN_METHOD(MetadataKeyIsLegal) {
   if (!info[0]->IsString()) {
     return Nan::ThrowTypeError(
@@ -298,16 +332,101 @@ NAN_METHOD(SetDefaultRootsPem) {
   }
 }
 
+NAUV_WORK_CB(LogMessagesCallback) {
+  Nan::HandleScope scope;
+  std::list<log_args *> args;
+  uv_mutex_lock(&grpc_logger_state.mutex);
+  args.splice(args.begin(), *grpc_logger_state.pending_args);
+  uv_mutex_unlock(&grpc_logger_state.mutex);
+  /* Call the callback with each log message */
+  while (!args.empty()) {
+    log_args *arg = args.front();
+    args.pop_front();
+    Local<Value> file = Nan::New(arg->core_args.file).ToLocalChecked();
+    Local<Value> line = Nan::New<Uint32, uint32_t>(arg->core_args.line);
+    Local<Value> severity = Nan::New(
+        gpr_log_severity_string(arg->core_args.severity)).ToLocalChecked();
+    Local<Value> message = Nan::New(arg->core_args.message).ToLocalChecked();
+    Local<Value> timestamp = Nan::New<v8::Date>(
+        grpc::node::TimespecToMilliseconds(arg->timestamp)).ToLocalChecked();
+    const int argc = 5;
+    Local<Value> argv[argc] = {file, line, severity, message, timestamp};
+    grpc_logger_state.callback->Call(argc, argv);
+    delete[] arg->core_args.message;
+    delete arg;
+  }
+}
+
+void node_log_func(gpr_log_func_args *args) {
+  // TODO(mlumish): Use the core's log formatter when it becomes available
+  log_args *args_copy = new log_args;
+  size_t message_len = strlen(args->message) + 1;
+  char *message = new char[message_len];
+  memcpy(message, args->message, message_len);
+  memcpy(&args_copy->core_args, args, sizeof(gpr_log_func_args));
+  args_copy->core_args.message = message;
+  args_copy->timestamp = gpr_now(GPR_CLOCK_REALTIME);
+
+  uv_mutex_lock(&grpc_logger_state.mutex);
+  grpc_logger_state.pending_args->push_back(args_copy);
+  uv_mutex_unlock(&grpc_logger_state.mutex);
+
+  uv_async_send(&grpc_logger_state.async);
+}
+
+void init_logger() {
+  memset(&grpc_logger_state, 0, sizeof(logger_state));
+  grpc_logger_state.pending_args = new std::list<log_args *>();
+  uv_mutex_init(&grpc_logger_state.mutex);
+  uv_async_init(uv_default_loop(),
+                &grpc_logger_state.async,
+                LogMessagesCallback);
+  uv_unref((uv_handle_t*)&grpc_logger_state.async);
+  grpc_logger_state.logger_set = false;
+
+  gpr_log_verbosity_init();
+}
+
+/* This registers a JavaScript logger for messages from the gRPC core. Because
+   that handler has to be run in the context of the JavaScript event loop, it
+   will be run asynchronously. To minimize the problems that could cause for
+   debugging, we leave core to do its default synchronous logging until a
+   JavaScript logger is set */
+NAN_METHOD(SetDefaultLoggerCallback) {
+  if (!info[0]->IsFunction()) {
+    return Nan::ThrowTypeError(
+        "setDefaultLoggerCallback's argument must be a function");
+  }
+  if (!grpc_logger_state.logger_set) {
+    gpr_set_log_function(node_log_func);
+    grpc_logger_state.logger_set = true;
+  }
+  grpc_logger_state.callback = new Nan::Callback(info[0].As<v8::Function>());
+}
+
+NAN_METHOD(SetLogVerbosity) {
+  if (!info[0]->IsUint32()) {
+    return Nan::ThrowTypeError(
+        "setLogVerbosity's argument must be a number");
+  }
+  gpr_log_severity severity = static_cast<gpr_log_severity>(
+      Nan::To<uint32_t>(info[0]).FromJust());
+  gpr_set_log_verbosity(severity);
+}
+
 void init(Local<Object> exports) {
   Nan::HandleScope scope;
   grpc_init();
   grpc_set_ssl_roots_override_callback(get_ssl_roots_override);
+  init_logger();
+
   InitStatusConstants(exports);
   InitCallErrorConstants(exports);
   InitOpTypeConstants(exports);
   InitPropagateConstants(exports);
   InitConnectivityStateConstants(exports);
   InitWriteFlags(exports);
+  InitLogConstants(exports);
 
   grpc::node::Call::Init(exports);
   grpc::node::CallCredentials::Init(exports);
@@ -333,6 +452,14 @@ void init(Local<Object> exports) {
            Nan::GetFunction(
                Nan::New<FunctionTemplate>(SetDefaultRootsPem)
                             ).ToLocalChecked());
+  Nan::Set(exports, Nan::New("setDefaultLoggerCallback").ToLocalChecked(),
+           Nan::GetFunction(
+               Nan::New<FunctionTemplate>(SetDefaultLoggerCallback)
+                            ).ToLocalChecked());
+  Nan::Set(exports, Nan::New("setLogVerbosity").ToLocalChecked(),
+           Nan::GetFunction(
+               Nan::New<FunctionTemplate>(SetLogVerbosity)
+                            ).ToLocalChecked());
 }
 
 NODE_MODULE(grpc_node, init)
diff --git a/src/node/health_check/LICENSE b/src/node/health_check/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..0209b570e10da54c158654433c5d3212b548c352
--- /dev/null
+++ b/src/node/health_check/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2015, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/node/health_check/health.js b/src/node/health_check/health.js
index 523668308814fc9e37acb0d56569c3d6f4c60333..64ba9fb9601acdc29f3a33da4e0c5a9d801990dc 100644
--- a/src/node/health_check/health.js
+++ b/src/node/health_check/health.js
@@ -33,14 +33,12 @@
 
 'use strict';
 
-var grpc = require('../');
+var grpc = require('grpc');
 
 var _ = require('lodash');
 
-var health_proto = grpc.load(__dirname +
-    '/../../proto/grpc/health/v1/health.proto');
-
-var HealthClient = health_proto.grpc.health.v1.Health;
+var health_messages = require('./v1/health_pb');
+var health_service = require('./v1/health_grpc_pb');
 
 function HealthImplementation(statusMap) {
   this.statusMap = _.clone(statusMap);
@@ -51,17 +49,19 @@ HealthImplementation.prototype.setStatus = function(service, status) {
 };
 
 HealthImplementation.prototype.check = function(call, callback){
-  var service = call.request.service;
+  var service = call.request.getService();
   var status = _.get(this.statusMap, service, null);
   if (status === null) {
     callback({code:grpc.status.NOT_FOUND});
   } else {
-    callback(null, {status: status});
+    var response = new health_messages.HealthCheckResponse();
+    response.setStatus(status);
+    callback(null, response);
   }
 };
 
 module.exports = {
-  Client: HealthClient,
-  service: HealthClient.service,
+  Client: health_service.HealthClient,
+  service: health_service.HealthService,
   Implementation: HealthImplementation
 };
diff --git a/test/build/zookeeper.c b/src/node/health_check/node_modules/grpc.js
similarity index 85%
rename from test/build/zookeeper.c
rename to src/node/health_check/node_modules/grpc.js
index 7cd3d0da9e95fca53e9e83d818f35e3f26c1dc9d..42161198ccd19de9adb9bfc9da2fccad7434726a 100644
--- a/test/build/zookeeper.c
+++ b/src/node/health_check/node_modules/grpc.js
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -31,13 +31,7 @@
  *
  */
 
-/* This is just a compilation test, to see if we have Zookeeper C client
-   library installed. */
+/* This exists solely to allow the generated code to import the grpc module
+ * without using a relative path */
 
-#include <stdlib.h>
-#include <zookeeper/zookeeper.h>
-
-int main() {
-  zookeeper_init(NULL, NULL, 0, 0, 0, 0);
-  return 0;
-}
+module.exports = require('../..');
diff --git a/src/node/health_check/package.json b/src/node/health_check/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..67f5301df7ec8eb62a37dfd16bf57ff57c3c0225
--- /dev/null
+++ b/src/node/health_check/package.json
@@ -0,0 +1,29 @@
+{
+  "name": "grpc-health-check",
+  "version": "1.1.0-dev",
+  "author": "Google Inc.",
+  "description": "Health check service for use with gRPC",
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/grpc/grpc.git"
+  },
+  "bugs": "https://github.com/grpc/grpc/issues",
+  "contributors": [
+    {
+      "name": "Michael Lumish",
+      "email": "mlumish@google.com"
+    }
+  ],
+  "dependencies": {
+    "grpc": "^0.15.0",
+    "lodash": "^3.9.3",
+    "google-protobuf": "^3.0.0-alpha.5"
+  },
+  "files": {
+    "LICENSE",
+    "health.js",
+    "v1"
+  },
+  "main": "src/node/index.js",
+  "license": "BSD-3-Clause"
+}
diff --git a/src/objective-c/tests/RemoteTestClient/empty.proto b/src/node/health_check/v1/health_grpc_pb.js
similarity index 51%
rename from src/objective-c/tests/RemoteTestClient/empty.proto
rename to src/node/health_check/v1/health_grpc_pb.js
index a678048289e2fa85ff5dfb084e027b801e70b028..89bc304e56664f2fd8c612779b19471b200c92be 100644
--- a/src/objective-c/tests/RemoteTestClient/empty.proto
+++ b/src/node/health_check/v1/health_grpc_pb.js
@@ -1,3 +1,6 @@
+// GENERATED CODE -- DO NOT EDIT!
+
+// Original file comments:
 // Copyright 2015, Google Inc.
 // All rights reserved.
 //
@@ -26,19 +29,46 @@
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+'use strict';
+var grpc = require('grpc');
+var v1_health_pb = require('../v1/health_pb.js');
 
-syntax = "proto3";
+function serialize_HealthCheckRequest(arg) {
+  if (!(arg instanceof v1_health_pb.HealthCheckRequest)) {
+    throw new Error('Expected argument of type HealthCheckRequest');
+  }
+  return new Buffer(arg.serializeBinary());
+}
 
-package grpc.testing;
+function deserialize_HealthCheckRequest(buffer_arg) {
+  return v1_health_pb.HealthCheckRequest.deserializeBinary(new Uint8Array(buffer_arg));
+}
 
-option objc_class_prefix = "RMT";
+function serialize_HealthCheckResponse(arg) {
+  if (!(arg instanceof v1_health_pb.HealthCheckResponse)) {
+    throw new Error('Expected argument of type HealthCheckResponse');
+  }
+  return new Buffer(arg.serializeBinary());
+}
 
-// An empty message that you can re-use to avoid defining duplicated empty
-// messages in your project. A typical example is to use it as argument or the
-// return value of a service API. For instance:
-//
-//   service Foo {
-//     rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { };
-//   };
-//
-message Empty {}
+function deserialize_HealthCheckResponse(buffer_arg) {
+  return v1_health_pb.HealthCheckResponse.deserializeBinary(new Uint8Array(buffer_arg));
+}
+
+
+var HealthService = exports.HealthService = {
+  check: {
+    path: '/grpc.health.v1.Health/Check',
+    requestStream: false,
+    responseStream: false,
+    requestType: v1_health_pb.HealthCheckRequest,
+    responseType: v1_health_pb.HealthCheckResponse,
+    requestSerialize: serialize_HealthCheckRequest,
+    requestDeserialize: deserialize_HealthCheckRequest,
+    responseSerialize: serialize_HealthCheckResponse,
+    responseDeserialize: deserialize_HealthCheckResponse,
+  },
+};
+
+exports.HealthClient = grpc.makeGenericClientConstructor(HealthService);
diff --git a/src/node/health_check/v1/health_pb.js b/src/node/health_check/v1/health_pb.js
new file mode 100644
index 0000000000000000000000000000000000000000..b36d47cdbb94aeff1e65dd19429551f2be216a55
--- /dev/null
+++ b/src/node/health_check/v1/health_pb.js
@@ -0,0 +1,342 @@
+/**
+ * @fileoverview
+ * @enhanceable
+ * @public
+ */
+// GENERATED CODE -- DO NOT EDIT!
+
+var jspb = require('google-protobuf');
+var goog = jspb;
+var global = Function('return this')();
+
+goog.exportSymbol('proto.grpc.health.v1.HealthCheckRequest', null, global);
+goog.exportSymbol('proto.grpc.health.v1.HealthCheckResponse', null, global);
+goog.exportSymbol('proto.grpc.health.v1.HealthCheckResponse.ServingStatus', null, global);
+
+/**
+ * Generated by JsPbCodeGenerator.
+ * @param {Array=} opt_data Optional initial data array, typically from a
+ * server response, or constructed directly in Javascript. The array is used
+ * in place and becomes part of the constructed object. It is not cloned.
+ * If no data is provided, the constructed object will be empty, but still
+ * valid.
+ * @extends {jspb.Message}
+ * @constructor
+ */
+proto.grpc.health.v1.HealthCheckRequest = function(opt_data) {
+  jspb.Message.initialize(this, opt_data, 0, -1, null, null);
+};
+goog.inherits(proto.grpc.health.v1.HealthCheckRequest, jspb.Message);
+if (goog.DEBUG && !COMPILED) {
+  proto.grpc.health.v1.HealthCheckRequest.displayName = 'proto.grpc.health.v1.HealthCheckRequest';
+}
+
+
+if (jspb.Message.GENERATE_TO_OBJECT) {
+/**
+ * Creates an object representation of this proto suitable for use in Soy templates.
+ * Field names that are reserved in JavaScript and will be renamed to pb_name.
+ * To access a reserved field use, foo.pb_<name>, eg, foo.pb_default.
+ * For the list of reserved names please see:
+ *     com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.
+ * @param {boolean=} opt_includeInstance Whether to include the JSPB instance
+ *     for transitional soy proto support: http://goto/soy-param-migration
+ * @return {!Object}
+ */
+proto.grpc.health.v1.HealthCheckRequest.prototype.toObject = function(opt_includeInstance) {
+  return proto.grpc.health.v1.HealthCheckRequest.toObject(opt_includeInstance, this);
+};
+
+
+/**
+ * Static version of the {@see toObject} method.
+ * @param {boolean|undefined} includeInstance Whether to include the JSPB
+ *     instance for transitional soy proto support:
+ *     http://goto/soy-param-migration
+ * @param {!proto.grpc.health.v1.HealthCheckRequest} msg The msg instance to transform.
+ * @return {!Object}
+ */
+proto.grpc.health.v1.HealthCheckRequest.toObject = function(includeInstance, msg) {
+  var f, obj = {
+    service: msg.getService()
+  };
+
+  if (includeInstance) {
+    obj.$jspbMessageInstance = msg;
+  }
+  return obj;
+};
+}
+
+
+/**
+ * Deserializes binary data (in protobuf wire format).
+ * @param {jspb.ByteSource} bytes The bytes to deserialize.
+ * @return {!proto.grpc.health.v1.HealthCheckRequest}
+ */
+proto.grpc.health.v1.HealthCheckRequest.deserializeBinary = function(bytes) {
+  var reader = new jspb.BinaryReader(bytes);
+  var msg = new proto.grpc.health.v1.HealthCheckRequest;
+  return proto.grpc.health.v1.HealthCheckRequest.deserializeBinaryFromReader(msg, reader);
+};
+
+
+/**
+ * Deserializes binary data (in protobuf wire format) from the
+ * given reader into the given message object.
+ * @param {!proto.grpc.health.v1.HealthCheckRequest} msg The message object to deserialize into.
+ * @param {!jspb.BinaryReader} reader The BinaryReader to use.
+ * @return {!proto.grpc.health.v1.HealthCheckRequest}
+ */
+proto.grpc.health.v1.HealthCheckRequest.deserializeBinaryFromReader = function(msg, reader) {
+  while (reader.nextField()) {
+    if (reader.isEndGroup()) {
+      break;
+    }
+    var field = reader.getFieldNumber();
+    switch (field) {
+    case 1:
+      var value = /** @type {string} */ (reader.readString());
+      msg.setService(value);
+      break;
+    default:
+      reader.skipField();
+      break;
+    }
+  }
+  return msg;
+};
+
+
+/**
+ * Class method variant: serializes the given message to binary data
+ * (in protobuf wire format), writing to the given BinaryWriter.
+ * @param {!proto.grpc.health.v1.HealthCheckRequest} message
+ * @param {!jspb.BinaryWriter} writer
+ */
+proto.grpc.health.v1.HealthCheckRequest.serializeBinaryToWriter = function(message, writer) {
+  message.serializeBinaryToWriter(writer);
+};
+
+
+/**
+ * Serializes the message to binary data (in protobuf wire format).
+ * @return {!Uint8Array}
+ */
+proto.grpc.health.v1.HealthCheckRequest.prototype.serializeBinary = function() {
+  var writer = new jspb.BinaryWriter();
+  this.serializeBinaryToWriter(writer);
+  return writer.getResultBuffer();
+};
+
+
+/**
+ * Serializes the message to binary data (in protobuf wire format),
+ * writing to the given BinaryWriter.
+ * @param {!jspb.BinaryWriter} writer
+ */
+proto.grpc.health.v1.HealthCheckRequest.prototype.serializeBinaryToWriter = function (writer) {
+  var f = undefined;
+  f = this.getService();
+  if (f.length > 0) {
+    writer.writeString(
+      1,
+      f
+    );
+  }
+};
+
+
+/**
+ * Creates a deep clone of this proto. No data is shared with the original.
+ * @return {!proto.grpc.health.v1.HealthCheckRequest} The clone.
+ */
+proto.grpc.health.v1.HealthCheckRequest.prototype.cloneMessage = function() {
+  return /** @type {!proto.grpc.health.v1.HealthCheckRequest} */ (jspb.Message.cloneMessage(this));
+};
+
+
+/**
+ * optional string service = 1;
+ * @return {string}
+ */
+proto.grpc.health.v1.HealthCheckRequest.prototype.getService = function() {
+  return /** @type {string} */ (jspb.Message.getFieldProto3(this, 1, ""));
+};
+
+
+/** @param {string} value  */
+proto.grpc.health.v1.HealthCheckRequest.prototype.setService = function(value) {
+  jspb.Message.setField(this, 1, value);
+};
+
+
+
+/**
+ * Generated by JsPbCodeGenerator.
+ * @param {Array=} opt_data Optional initial data array, typically from a
+ * server response, or constructed directly in Javascript. The array is used
+ * in place and becomes part of the constructed object. It is not cloned.
+ * If no data is provided, the constructed object will be empty, but still
+ * valid.
+ * @extends {jspb.Message}
+ * @constructor
+ */
+proto.grpc.health.v1.HealthCheckResponse = function(opt_data) {
+  jspb.Message.initialize(this, opt_data, 0, -1, null, null);
+};
+goog.inherits(proto.grpc.health.v1.HealthCheckResponse, jspb.Message);
+if (goog.DEBUG && !COMPILED) {
+  proto.grpc.health.v1.HealthCheckResponse.displayName = 'proto.grpc.health.v1.HealthCheckResponse';
+}
+
+
+if (jspb.Message.GENERATE_TO_OBJECT) {
+/**
+ * Creates an object representation of this proto suitable for use in Soy templates.
+ * Field names that are reserved in JavaScript and will be renamed to pb_name.
+ * To access a reserved field use, foo.pb_<name>, eg, foo.pb_default.
+ * For the list of reserved names please see:
+ *     com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.
+ * @param {boolean=} opt_includeInstance Whether to include the JSPB instance
+ *     for transitional soy proto support: http://goto/soy-param-migration
+ * @return {!Object}
+ */
+proto.grpc.health.v1.HealthCheckResponse.prototype.toObject = function(opt_includeInstance) {
+  return proto.grpc.health.v1.HealthCheckResponse.toObject(opt_includeInstance, this);
+};
+
+
+/**
+ * Static version of the {@see toObject} method.
+ * @param {boolean|undefined} includeInstance Whether to include the JSPB
+ *     instance for transitional soy proto support:
+ *     http://goto/soy-param-migration
+ * @param {!proto.grpc.health.v1.HealthCheckResponse} msg The msg instance to transform.
+ * @return {!Object}
+ */
+proto.grpc.health.v1.HealthCheckResponse.toObject = function(includeInstance, msg) {
+  var f, obj = {
+    status: msg.getStatus()
+  };
+
+  if (includeInstance) {
+    obj.$jspbMessageInstance = msg;
+  }
+  return obj;
+};
+}
+
+
+/**
+ * Deserializes binary data (in protobuf wire format).
+ * @param {jspb.ByteSource} bytes The bytes to deserialize.
+ * @return {!proto.grpc.health.v1.HealthCheckResponse}
+ */
+proto.grpc.health.v1.HealthCheckResponse.deserializeBinary = function(bytes) {
+  var reader = new jspb.BinaryReader(bytes);
+  var msg = new proto.grpc.health.v1.HealthCheckResponse;
+  return proto.grpc.health.v1.HealthCheckResponse.deserializeBinaryFromReader(msg, reader);
+};
+
+
+/**
+ * Deserializes binary data (in protobuf wire format) from the
+ * given reader into the given message object.
+ * @param {!proto.grpc.health.v1.HealthCheckResponse} msg The message object to deserialize into.
+ * @param {!jspb.BinaryReader} reader The BinaryReader to use.
+ * @return {!proto.grpc.health.v1.HealthCheckResponse}
+ */
+proto.grpc.health.v1.HealthCheckResponse.deserializeBinaryFromReader = function(msg, reader) {
+  while (reader.nextField()) {
+    if (reader.isEndGroup()) {
+      break;
+    }
+    var field = reader.getFieldNumber();
+    switch (field) {
+    case 1:
+      var value = /** @type {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} */ (reader.readEnum());
+      msg.setStatus(value);
+      break;
+    default:
+      reader.skipField();
+      break;
+    }
+  }
+  return msg;
+};
+
+
+/**
+ * Class method variant: serializes the given message to binary data
+ * (in protobuf wire format), writing to the given BinaryWriter.
+ * @param {!proto.grpc.health.v1.HealthCheckResponse} message
+ * @param {!jspb.BinaryWriter} writer
+ */
+proto.grpc.health.v1.HealthCheckResponse.serializeBinaryToWriter = function(message, writer) {
+  message.serializeBinaryToWriter(writer);
+};
+
+
+/**
+ * Serializes the message to binary data (in protobuf wire format).
+ * @return {!Uint8Array}
+ */
+proto.grpc.health.v1.HealthCheckResponse.prototype.serializeBinary = function() {
+  var writer = new jspb.BinaryWriter();
+  this.serializeBinaryToWriter(writer);
+  return writer.getResultBuffer();
+};
+
+
+/**
+ * Serializes the message to binary data (in protobuf wire format),
+ * writing to the given BinaryWriter.
+ * @param {!jspb.BinaryWriter} writer
+ */
+proto.grpc.health.v1.HealthCheckResponse.prototype.serializeBinaryToWriter = function (writer) {
+  var f = undefined;
+  f = this.getStatus();
+  if (f !== 0.0) {
+    writer.writeEnum(
+      1,
+      f
+    );
+  }
+};
+
+
+/**
+ * Creates a deep clone of this proto. No data is shared with the original.
+ * @return {!proto.grpc.health.v1.HealthCheckResponse} The clone.
+ */
+proto.grpc.health.v1.HealthCheckResponse.prototype.cloneMessage = function() {
+  return /** @type {!proto.grpc.health.v1.HealthCheckResponse} */ (jspb.Message.cloneMessage(this));
+};
+
+
+/**
+ * optional ServingStatus status = 1;
+ * @return {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus}
+ */
+proto.grpc.health.v1.HealthCheckResponse.prototype.getStatus = function() {
+  return /** @type {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} */ (jspb.Message.getFieldProto3(this, 1, 0));
+};
+
+
+/** @param {!proto.grpc.health.v1.HealthCheckResponse.ServingStatus} value  */
+proto.grpc.health.v1.HealthCheckResponse.prototype.setStatus = function(value) {
+  jspb.Message.setField(this, 1, value);
+};
+
+
+/**
+ * @enum {number}
+ */
+proto.grpc.health.v1.HealthCheckResponse.ServingStatus = {
+  UNKNOWN: 0,
+  SERVING: 1,
+  NOT_SERVING: 2
+};
+
+goog.object.extend(exports, proto.grpc.health.v1);
diff --git a/src/node/index.js b/src/node/index.js
index 66664d94b5a225b3fb90c802c07a35257d8ff71c..9fb6faa5d7ce77b1f0df37b9d2c5336ac5acf88f 100644
--- a/src/node/index.js
+++ b/src/node/index.js
@@ -46,6 +46,8 @@ var client = require('./src/client.js');
 
 var server = require('./src/server.js');
 
+var common = require('./src/common.js');
+
 var Metadata = require('./src/metadata.js');
 
 var grpc = require('./src/grpc_extension');
@@ -122,6 +124,42 @@ exports.load = function load(filename, format, options) {
   return loadObject(builder.ns, options);
 };
 
+var log_template = _.template(
+    '{severity} {timestamp}\t{file}:{line}]\t{message}',
+    {interpolate: /{([\s\S]+?)}/g});
+
+/**
+ * Sets the logger function for the gRPC module. For debugging purposes, the C
+ * core will log synchronously directly to stdout unless this function is
+ * called. Note: the output format here is intended to be informational, and
+ * is not guaranteed to stay the same in the future.
+ * Logs will be directed to logger.error.
+ * @param {Console} logger A Console-like object.
+ */
+exports.setLogger = function setLogger(logger) {
+  common.logger = logger;
+  grpc.setDefaultLoggerCallback(function(file, line, severity,
+                                         message, timestamp) {
+    logger.error(log_template({
+      file: path.basename(file),
+      line: line,
+      severity: severity,
+      message: message,
+      timestamp: timestamp.toISOString()
+    }));
+  });
+};
+
+/**
+ * Sets the logger verbosity for gRPC module logging. The options are members
+ * of the grpc.logVerbosity map.
+ * @param {Number} verbosity The minimum severity to log
+ */
+exports.setLogVerbosity = function setLogVerbosity(verbosity) {
+  common.logVerbosity = verbosity;
+  grpc.setLogVerbosity(verbosity);
+};
+
 /**
  * @see module:src/server.Server
  */
@@ -152,6 +190,11 @@ exports.callError = grpc.callError;
  */
 exports.writeFlags = grpc.writeFlags;
 
+/**
+ * Log verbosity setting name to code number mapping
+ */
+exports.logVerbosity = grpc.logVerbosity;
+
 /**
  * Credentials factories
  */
diff --git a/src/node/interop/interop_server.js b/src/node/interop/interop_server.js
index 72807623054cff8df0bda0f1fce0f26106dde3ea..05f52a1083de9d6a2e54d876405af7427518f775 100644
--- a/src/node/interop/interop_server.js
+++ b/src/node/interop/interop_server.js
@@ -45,9 +45,6 @@ var testProto = grpc.load({
 var ECHO_INITIAL_KEY = 'x-grpc-test-echo-initial';
 var ECHO_TRAILING_KEY = 'x-grpc-test-echo-trailing-bin';
 
-var incompressible_data = fs.readFileSync(
-    __dirname + '/../../../test/cpp/interop/rnd.dat');
-
 /**
  * Create a buffer filled with size zeroes
  * @param {number} size The length of the buffer
@@ -88,15 +85,7 @@ function getEchoTrailer(call) {
 }
 
 function getPayload(payload_type, size) {
-  if (payload_type === 'RANDOM') {
-    payload_type = ['COMPRESSABLE',
-                    'UNCOMPRESSABLE'][Math.random() < 0.5 ? 0 : 1];
-  }
-  var body;
-  switch (payload_type) {
-    case 'COMPRESSABLE': body = zeroBuffer(size); break;
-    case 'UNCOMPRESSABLE': incompressible_data.slice(size); break;
-  }
+  var body = zeroBuffer(size);
   return {type: payload_type, body: body};
 }
 
diff --git a/src/node/src/common.js b/src/node/src/common.js
index 8cf43b7a84cc85be1509512d669728d1649e7cdc..22159dd39f7079f829f8776683a680882df4fdd7 100644
--- a/src/node/src/common.js
+++ b/src/node/src/common.js
@@ -157,3 +157,24 @@ exports.getProtobufServiceAttrs = function getProtobufServiceAttrs(service,
     }];
   }));
 };
+
+/**
+ * The logger object for the gRPC module. Defaults to console.
+ */
+exports.logger = console;
+
+/**
+ * The current logging verbosity. 0 corresponds to logging everything
+ */
+exports.logVerbosity = 0;
+
+/**
+ * Log a message if the severity is at least as high as the current verbosity
+ * @param {Number} severity A value of the grpc.logVerbosity map
+ * @param {String} message The message to log
+ */
+exports.log = function log(severity, message) {
+  if (severity >= exports.logVerbosity) {
+    exports.logger.error(message);
+  }
+};
diff --git a/src/node/src/credentials.js b/src/node/src/credentials.js
index a12eade4e1f518f10b72cf6506ce52c3110f8c5b..b746d0625dfaa1d56035c574f167d5629ad20b29 100644
--- a/src/node/src/credentials.js
+++ b/src/node/src/credentials.js
@@ -69,6 +69,8 @@ var ChannelCredentials = grpc.ChannelCredentials;
 
 var Metadata = require('./metadata.js');
 
+var common = require('./common.js');
+
 /**
  * Create an SSL Credentials object. If using a client-side certificate, both
  * the second and third arguments must be passed.
@@ -120,7 +122,7 @@ exports.createFromGoogleCredential = function(google_credential) {
     var service_url = auth_context.service_url;
     google_credential.getRequestMetadata(service_url, function(err, header) {
       if (err) {
-        console.log('Auth error:', err);
+        common.log(grpc.logVerbosity.INFO, 'Auth error:' + err);
         callback(err);
         return;
       }
diff --git a/src/node/src/server.js b/src/node/src/server.js
index fd5153f1244c1a8092a841983de0c2597e2db61a..b3b414969ab84941ff950e4a33d0cbc9a86ae9c3 100644
--- a/src/node/src/server.js
+++ b/src/node/src/server.js
@@ -735,8 +735,8 @@ Server.prototype.addService = function(service, implementation) {
     }
     var impl;
     if (implementation[name] === undefined) {
-      console.warn('Method handler for %s expected but not provided',
-                   attrs.path);
+      common.log(grpc.logVerbosity.ERROR, 'Method handler for ' +
+          attrs.path + ' expected but not provided');
       impl = defaultHandler[method_type];
     } else {
       impl = _.bind(implementation[name], implementation);
diff --git a/src/node/test/credentials_test.js b/src/node/test/credentials_test.js
index 794215b246cd5d540d360391352bafbece71a7b8..0a21572582e2e62c76d21d18bed3b16f84e0345d 100644
--- a/src/node/test/credentials_test.js
+++ b/src/node/test/credentials_test.js
@@ -318,7 +318,7 @@ describe('client credentials', function() {
       done();
     });
   });
-  it.skip('should propagate errors that the updater emits', function(done) {
+  it('should propagate errors that the updater emits', function(done) {
     var metadataUpdater = function(service_url, callback) {
       var error = new Error('Authentication error');
       error.code = grpc.status.UNAUTHENTICATED;
@@ -370,7 +370,7 @@ describe('client credentials', function() {
       done();
     });
   });
-  it.skip('should get an error from a Google credential', function(done) {
+  it('should get an error from a Google credential', function(done) {
     var creds = grpc.credentials.createFromGoogleCredential(
         fakeFailingGoogleCredentials);
     var combined_creds = grpc.credentials.combineChannelCredentials(
diff --git a/src/node/test/health_test.js b/src/node/test/health_test.js
index c93b528d42f2a6d259ac95ef408a47a2258cef5e..efbca46c2dc732544415af1caa93743daa70f820 100644
--- a/src/node/test/health_test.js
+++ b/src/node/test/health_test.js
@@ -35,15 +35,19 @@
 
 var assert = require('assert');
 
-var health = require('../health_check/health.js');
+var health = require('../health_check/health');
+
+var health_messages = require('../health_check/v1/health_pb');
+
+var ServingStatus = health_messages.HealthCheckResponse.ServingStatus;
 
 var grpc = require('../');
 
 describe('Health Checking', function() {
   var statusMap = {
-    '': 'SERVING',
-    'grpc.test.TestServiceNotServing': 'NOT_SERVING',
-    'grpc.test.TestServiceServing': 'SERVING'
+    '': ServingStatus.SERVING,
+    'grpc.test.TestServiceNotServing': ServingStatus.NOT_SERVING,
+    'grpc.test.TestServiceServing': ServingStatus.SERVING
   };
   var healthServer;
   var healthImpl;
@@ -51,7 +55,7 @@ describe('Health Checking', function() {
   before(function() {
     healthServer = new grpc.Server();
     healthImpl = new health.Implementation(statusMap);
-    healthServer.addProtoService(health.service, healthImpl);
+    healthServer.addService(health.service, healthImpl);
     var port_num = healthServer.bind('0.0.0.0:0',
                                      grpc.ServerCredentials.createInsecure());
     healthServer.start();
@@ -62,43 +66,51 @@ describe('Health Checking', function() {
     healthServer.forceShutdown();
   });
   it('should say an enabled service is SERVING', function(done) {
-    healthClient.check({service: ''}, function(err, response) {
+    var request = new health_messages.HealthCheckRequest();
+    request.setService('');
+    healthClient.check(request, function(err, response) {
       assert.ifError(err);
-      assert.strictEqual(response.status, 'SERVING');
+      assert.strictEqual(response.getStatus(), ServingStatus.SERVING);
       done();
     });
   });
   it('should say that a disabled service is NOT_SERVING', function(done) {
-    healthClient.check({service: 'grpc.test.TestServiceNotServing'},
-                       function(err, response) {
-                         assert.ifError(err);
-                         assert.strictEqual(response.status, 'NOT_SERVING');
-                         done();
-                       });
+    var request = new health_messages.HealthCheckRequest();
+    request.setService('grpc.test.TestServiceNotServing');
+    healthClient.check(request, function(err, response) {
+      assert.ifError(err);
+      assert.strictEqual(response.getStatus(), ServingStatus.NOT_SERVING);
+      done();
+    });
   });
   it('should say that an enabled service is SERVING', function(done) {
-    healthClient.check({service: 'grpc.test.TestServiceServing'},
-                       function(err, response) {
-                         assert.ifError(err);
-                         assert.strictEqual(response.status, 'SERVING');
-                         done();
-                       });
+    var request = new health_messages.HealthCheckRequest();
+    request.setService('grpc.test.TestServiceServing');
+    healthClient.check(request, function(err, response) {
+      assert.ifError(err);
+      assert.strictEqual(response.getStatus(), ServingStatus.SERVING);
+      done();
+    });
   });
   it('should get NOT_FOUND if the service is not registered', function(done) {
-    healthClient.check({service: 'not_registered'}, function(err, response) {
+    var request = new health_messages.HealthCheckRequest();
+    request.setService('not_registered');
+    healthClient.check(request, function(err, response) {
       assert(err);
       assert.strictEqual(err.code, grpc.status.NOT_FOUND);
       done();
     });
   });
   it('should get a different response if the status changes', function(done) {
-    healthClient.check({service: 'transient'}, function(err, response) {
+    var request = new health_messages.HealthCheckRequest();
+    request.setService('transient');
+    healthClient.check(request, function(err, response) {
       assert(err);
       assert.strictEqual(err.code, grpc.status.NOT_FOUND);
-      healthImpl.setStatus('transient', 'SERVING');
-      healthClient.check({service: 'transient'}, function(err, response) {
+      healthImpl.setStatus('transient', ServingStatus.SERVING);
+      healthClient.check(request, function(err, response) {
         assert.ifError(err);
-        assert.strictEqual(response.status, 'SERVING');
+        assert.strictEqual(response.getStatus(), ServingStatus.SERVING);
         done();
       });
     });
diff --git a/src/node/tools/package.json b/src/node/tools/package.json
index d4849c2e38842bbb482f3ab19ecfde3f58b57b92..e5513d78791cad746f2e4b1099dab68397149eed 100644
--- a/src/node/tools/package.json
+++ b/src/node/tools/package.json
@@ -1,6 +1,6 @@
 {
   "name": "grpc-tools",
-  "version": "0.15.0-dev",
+  "version": "1.1.0-dev",
   "author": "Google Inc.",
   "description": "Tools for developing with gRPC on Node.js",
   "homepage": "http://www.grpc.io/",
@@ -34,6 +34,7 @@
     "index.js",
     "bin/protoc.js",
     "bin/protoc_plugin.js",
+    "bin/google/protobuf",
     "LICENSE"
   ],
   "main": "index.js"
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
new file mode 100644
index 0000000000000000000000000000000000000000..72cadb9319943fd87af724a3e2dc15e4f68b0622
--- /dev/null
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -0,0 +1,122 @@
+# CocoaPods podspec for the gRPC Proto Compiler Plugin
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Pod::Spec.new do |s|
+  # This pod is only a utility that will be used by other pods _at install time_ (not at compile
+  # time). Other pods can access it in their `prepare_command` script, under <pods_root>/<pod name>.
+  # Because CocoaPods installs pods in alphabetical order, beginning this pod's name with an
+  # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
+  # before them.
+  s.name     = '!ProtoCompiler-gRPCPlugin'
+  v = '0.14.0'
+  s.version  = v
+  s.summary  = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.'
+  s.description = <<-DESC
+    This podspec only downloads the gRPC protoc plugin so that local pods generating protos can use
+    it in their invocation of protoc, as part of their prepare_command.
+    The generated code will have a dependency on the gRPC Objective-C Proto runtime of the same
+    version. The runtime can be obtained as the "gRPC-ProtoRPC" pod.
+  DESC
+  s.homepage = 'http://www.grpc.io'
+  s.license  = {
+    :type => 'New BSD',
+    :text => <<-LICENSE
+      Copyright 2015, Google Inc.
+      All rights reserved.
+
+      Redistribution and use in source and binary forms, with or without
+      modification, are permitted provided that the following conditions are
+      met:
+
+          * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+          * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following disclaimer
+      in the documentation and/or other materials provided with the
+      distribution.
+          * Neither the name of Google Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+      A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+      OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+      SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+      LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+      DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+      THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+      (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+      OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+    LICENSE
+  }
+  s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
+
+  repo = 'grpc/grpc'
+  release = 'release-0_14_1'
+  file = "grpc_objective_c_plugin-#{v}-macos-x86_64.zip"
+  s.source = {
+    :http => "https://github.com/#{repo}/releases/download/#{release}/#{file}",
+    # TODO(jcanizales): Add sha1 or sha256
+    # :sha1 => '??',
+  }
+
+  repo_root = '../..'
+  plugin = 'grpc_objective_c_plugin'
+
+  s.preserve_paths = plugin
+
+  # Restrict the protoc version to the one supported by this plugin.
+  s.dependency '!ProtoCompiler', '3.0.0-beta-3.1'
+  # For the Protobuf dependency not to complain:
+  s.ios.deployment_target = '7.1'
+  s.osx.deployment_target = '10.9'
+  # Restrict the gRPC runtime version to the one supported by this plugin.
+  s.dependency 'gRPC-ProtoRPC', v
+
+  # This is only for local development of the plugin: If the Podfile brings this pod from a local
+  # directory using `:path`, CocoaPods won't download the zip file and so the plugin won't be
+  # present in this pod's directory. We use that knowledge to check for the existence of the file
+  # and, if absent, compile the plugin from the local sources.
+  s.prepare_command = <<-CMD
+    if [ ! -f #{plugin} ]; then
+      cd #{repo_root}
+      # This will build the plugin and put it in #{repo_root}/bins/opt.
+      #
+      # TODO(jcanizales): I reckon make will try to use locally-installed libprotoc (headers and
+      # library binary) if found, which _we do not want_. Find a way for this to always use the
+      # sources in the repo.
+      make #{plugin}
+      cd -
+    fi
+  CMD
+end
diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec
new file mode 100644
index 0000000000000000000000000000000000000000..5e59b25aee284a6d2d03ed74912ceb8273365369
--- /dev/null
+++ b/src/objective-c/!ProtoCompiler.podspec
@@ -0,0 +1,136 @@
+# Proto Compiler CocoaPods podspec
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Pod::Spec.new do |s|
+  # This pod is only a utility that will be used by other pods _at install time_ (not at compile
+  # time). Other pods can access it in their `prepare_command` script, under <pods_root>/<pod name>.
+  # Because CocoaPods installs pods in alphabetical order, beginning this pod's name with an
+  # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
+  # before them.
+  s.name     = '!ProtoCompiler'
+  v = '3.0.0-beta-3.1'
+  s.version  = v
+  s.summary  = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files'
+  s.description = <<-DESC
+    This podspec only downloads protoc so that local pods generating protos can execute it as part
+    of their prepare_command.
+    The generated code will have a dependency on the Protobuf Objective-C runtime of the same
+    version. The runtime can be obtained as the "Protobuf" pod.
+  DESC
+  s.homepage = 'https://github.com/google/protobuf'
+  s.license  = {
+    :type => 'New BSD',
+    :text => <<-LICENSE
+      This license applies to all parts of Protocol Buffers except the following:
+
+      - Atomicops support for generic gcc, located in
+        src/google/protobuf/stubs/atomicops_internals_generic_gcc.h.
+        This file is copyrighted by Red Hat Inc.
+
+      - Atomicops support for AIX/POWER, located in
+        src/google/protobuf/stubs/atomicops_internals_power.h.
+        This file is copyrighted by Bloomberg Finance LP.
+
+      Copyright 2014, Google Inc.  All rights reserved.
+
+      Redistribution and use in source and binary forms, with or without
+      modification, are permitted provided that the following conditions are
+      met:
+
+          * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+          * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following disclaimer
+      in the documentation and/or other materials provided with the
+      distribution.
+          * Neither the name of Google Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+      A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+      OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+      SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+      LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+      DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+      THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+      (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+      OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+      Code generated by the Protocol Buffer compiler is owned by the owner
+      of the input file used when generating it.  This code is not
+      standalone and requires a support library to be linked with it.  This
+      support library is itself covered by the above license.
+    LICENSE
+  }
+  # "The name and email addresses of the library maintainers, not the Podspec maintainer."
+  s.authors  = { 'The Protocol Buffers contributors' => 'protobuf@googlegroups.com' }
+
+  repo = 'google/protobuf'
+  release = "v#{v}"
+  file = "protoc-#{v}-osx-x86_64.zip"
+  s.source = {
+    :http => "https://github.com/#{repo}/releases/download/#{release}/#{file}",
+    # TODO(jcanizales): Add sha1 or sha256
+    # :sha1 => '??',
+  }
+
+  s.preserve_paths = 'protoc',
+                     'google/**/*.proto' # Well-known protobuf types
+
+  # Restrict the protobuf runtime version to the one supported by this version of protoc.
+  s.dependency 'Protobuf', v
+  # For the Protobuf dependency not to complain:
+  s.ios.deployment_target = '7.1'
+  s.osx.deployment_target = '10.9'
+
+  # This is only for local development of protoc: If the Podfile brings this pod from a local
+  # directory using `:path`, CocoaPods won't download the zip file and so the compiler won't be
+  # present in this pod's directory. We use that knowledge to check for the existence of the file
+  # and, if absent, build it from the local sources.
+  repo_root = '../..'
+  plugin = 'grpc_objective_c_plugin'
+  s.prepare_command = <<-CMD
+    if [ ! -f protoc ]; then
+      cd #{repo_root}
+      # This will build protoc from the Protobuf submodule of gRPC, and put it in
+      # #{repo_root}/bins/opt/protobuf.
+      #
+      # TODO(jcanizales): Make won't build protoc from sources if one's locally installed, which
+      # _we do not want_. Find a way for this to always build from source.
+      make #{plugin}
+      cd -
+    fi
+  CMD
+
+end
diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec
index 7d1de8071698936d6b11b8f175d84ae23ea2e13e..42b4434d0d3268ee623340e7ebb77c29d0e9fca4 100644
--- a/src/objective-c/BoringSSL.podspec
+++ b/src/objective-c/BoringSSL.podspec
@@ -31,7 +31,8 @@
 
 Pod::Spec.new do |s|
   s.name     = 'BoringSSL'
-  s.version  = '3.0'
+  version = '4.0'
+  s.version  = version
   s.summary  = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.'
   # Adapted from the homepage:
   s.description = <<-DESC
@@ -67,31 +68,139 @@ Pod::Spec.new do |s|
   s.authors  = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite'
 
   s.source = { :git => 'https://boringssl.googlesource.com/boringssl',
-               :tag => 'version_for_cocoapods_3.0' }
+               :tag => "version_for_cocoapods_#{version}" }
 
-  s.source_files = 'ssl/*.{h,c}',
-                   'ssl/**/*.{h,c}',
-                   '*.{h,c}',
-                   'crypto/*.{h,c}',
-                   'crypto/**/*.{h,c}',
-                   'include/openssl/*.h'
+  name = 'openssl'
 
-  s.public_header_files = 'include/openssl/*.h'
-  s.header_mappings_dir = 'include'
+  # When creating a dynamic framework, name it openssl.framework instead of BoringSSL.framework.
+  # This lets users write their includes like `#include <openssl/ssl.h>` as opposed to `#include
+  # <BoringSSL/ssl.h>`.
+  s.module_name = name
 
-  s.exclude_files = "**/*_test.*"
+  # When creating a dynamic framework, copy the headers under `include/openssl/` into the root of
+  # the `Headers/` directory of the framework (i.e., not under `Headers/include/openssl`).
+  #
+  # TODO(jcanizales): Debug why this doesn't work on macOS.
+  s.header_mappings_dir = 'include/openssl'
+
+  # The above has an undesired effect when creating a static library: It forces users to write
+  # includes like `#include <BoringSSL/ssl.h>`. `s.header_dir` adds a path prefix to that, and
+  # because Cocoapods lets omit the pod name when including headers of static libraries, the
+  # following lets users write `#include <openssl/ssl.h>`.
+  s.header_dir = name
+
+  # The module map and umbrella header created automatically by Cocoapods don't work for C libraries
+  # like this one. The following file, and a correct umbrella header, are created on the fly by the
+  # `prepare_command` of this pod.
+  s.module_map = 'include/openssl/module.modulemap'
 
   # We don't need to inhibit all warnings; only -Wno-shorten-64-to-32. But Cocoapods' linter doesn't
   # want that for some reason.
   s.compiler_flags = '-DOPENSSL_NO_ASM', '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
   s.requires_arc = false
 
+  # Like many other C libraries, BoringSSL has its public headers under `include/<libname>/` and its
+  # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't
+  # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in
+  # practice). Because we need our `header_mappings_dir` to be `include/openssl/` for the reason
+  # mentioned above, we work around the linter limitation by dividing the pod into two subspecs, one
+  # for public headers and the other for implementation. Each gets its own `header_mappings_dir`,
+  # making the linter happy.
+  s.subspec 'Interface' do |ss|
+    ss.header_mappings_dir = 'include/openssl'
+    ss.source_files = 'include/openssl/*.h'
+  end
+  s.subspec 'Implementation' do |ss|
+    ss.header_mappings_dir = '.'
+    ss.source_files = 'ssl/*.{h,c}',
+                      'ssl/**/*.{h,c}',
+                      '*.{h,c}',
+                      'crypto/*.{h,c}',
+                      'crypto/**/*.{h,c}'
+    ss.private_header_files = 'ssl/*.h',
+                              'ssl/**/*.h',
+                              '*.h',
+                              'crypto/*.h',
+                              'crypto/**/*.h'
+    ss.exclude_files = '**/*_test.*',
+                       '**/test_*.*',
+                       '**/test/*.*'
+
+    ss.dependency "#{s.name}/Interface", version
+  end
+
   s.prepare_command = <<-END_OF_COMMAND
     # Replace "const BIGNUM *I" in rsa.h with a lowercase i, as the former fails when including
     # OpenSSL in a Swift bridging header (complex.h defines "I", and it's as if the compiler
     # included it in every bridged header).
     sed -E -i '.back' 's/\\*I,/*i,/g' include/openssl/rsa.h
 
+    # Replace `#include "../crypto/internal.h"` in e_tls.c with `#include "../internal.h"`. The
+    # former assumes crypto/ is in the headers search path, which is hard to enforce when using
+    # dynamic frameworks. The latters always works, being relative to the current file.
+    sed -E -i '.back' 's/crypto\\///g' crypto/cipher/e_tls.c
+
+    # Add a module map and an umbrella header
+    cat > include/openssl/umbrella.h <<EOF
+      #include "ssl.h"
+      #include "crypto.h"
+      #include "aes.h"
+      /* The following macros are defined by base.h. The latter is the first file included by the
+         other headers. */
+      #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
+      #  include "arm_arch.h"
+      #endif
+      #include "asn1.h"
+      #include "asn1_mac.h"
+      #include "asn1t.h"
+      #include "blowfish.h"
+      #include "cast.h"
+      #include "chacha.h"
+      #include "cmac.h"
+      #include "conf.h"
+      #include "cpu.h"
+      #include "curve25519.h"
+      #include "des.h"
+      #include "dtls1.h"
+      #include "hkdf.h"
+      #include "md4.h"
+      #include "md5.h"
+      #include "newhope.h"
+      #include "obj_mac.h"
+      #include "objects.h"
+      #include "opensslv.h"
+      #include "ossl_typ.h"
+      #include "pkcs12.h"
+      #include "pkcs7.h"
+      #include "pkcs8.h"
+      #include "poly1305.h"
+      #include "rand.h"
+      #include "rc4.h"
+      #include "ripemd.h"
+      #include "safestack.h"
+      #include "srtp.h"
+      #include "time_support.h"
+      #include "x509.h"
+      #include "x509v3.h"
+    EOF
+    cat > include/openssl/module.modulemap <<EOF
+      framework module openssl {
+        umbrella header "umbrella.h"
+        export *
+        module * { export * }
+      }
+    EOF
+
+    # #include <inttypes.h> fails to compile when building a dynamic framework. libgit2 in
+    # https://github.com/libgit2/libgit2/commit/1ddada422caf8e72ba97dca2568d2bf879fed5f2 and libvpx
+    # in https://chromium.googlesource.com/webm/libvpx/+/1bec0c5a7e885ec792f6bb658eb3f34ad8f37b15
+    # work around it by removing the include. We need four of its macros, so we expand them here.
+    sed -E -i '.back' '/<inttypes.h>/d' include/openssl/bn.h
+    sed -E -i '.back' 's/PRIu32/"u"/g' include/openssl/bn.h
+    sed -E -i '.back' 's/PRIx32/"x"/g' include/openssl/bn.h
+    sed -E -i '.back' 's/PRIu64/"llu"/g' include/openssl/bn.h
+    sed -E -i '.back' 's/PRIx64/"llx"/g' include/openssl/bn.h
+
     # This is a bit ridiculous, but requiring people to install Go in order to build is slightly
     # more ridiculous IMO. To save you from scrolling, this is the last part of the podspec.
     # TODO(jcanizales): Translate err_data_generate.go into a Bash or Ruby script.
@@ -110,7 +219,7 @@ Pod::Spec.new do |s|
        * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
        * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
 
-       /* This file was generated by err_data_generate.go. */
+      /* This file was generated by err_data_generate.go. */
 
       #include <openssl/base.h>
       #include <openssl/err.h>
@@ -152,178 +261,166 @@ Pod::Spec.new do |s|
       OPENSSL_COMPILE_ASSERT(ERR_NUM_LIBS == 33, library_values_changed_num);
 
       const uint32_t kOpenSSLReasonValues[] = {
-          0xc3207ba,
-          0xc3287d4,
-          0xc3307e3,
-          0xc3387f3,
-          0xc340802,
-          0xc34881b,
-          0xc350827,
-          0xc358844,
-          0xc360856,
-          0xc368864,
-          0xc370874,
-          0xc378881,
-          0xc380891,
-          0xc38889c,
-          0xc3908b2,
-          0xc3988c1,
-          0xc3a08d5,
-          0xc3a87c7,
-          0xc3b00b0,
-          0x10321478,
-          0x10329484,
-          0x1033149d,
-          0x103394b0,
-          0x10340de1,
-          0x103494cf,
-          0x103514e4,
-          0x10359516,
-          0x1036152f,
-          0x10369544,
-          0x10371562,
-          0x10379571,
-          0x1038158d,
-          0x103895a8,
-          0x103915b7,
-          0x103995d3,
-          0x103a15ee,
-          0x103a9605,
-          0x103b1616,
-          0x103b962a,
-          0x103c1649,
-          0x103c9658,
-          0x103d166f,
-          0x103d9682,
-          0x103e0b6c,
-          0x103e96b3,
-          0x103f16c6,
-          0x103f96e0,
-          0x104016f0,
-          0x10409704,
-          0x1041171a,
-          0x10419732,
-          0x10421747,
-          0x1042975b,
-          0x1043176d,
-          0x104385d0,
-          0x104408c1,
-          0x10449782,
-          0x10451799,
-          0x104597ae,
-          0x104617bc,
-          0x10469695,
-          0x104714f7,
-          0x104787c7,
-          0x104800b0,
-          0x104894c3,
-          0x14320b4f,
-          0x14328b5d,
-          0x14330b6c,
-          0x14338b7e,
+          0xc320838,
+          0xc328852,
+          0xc330861,
+          0xc338871,
+          0xc340880,
+          0xc348899,
+          0xc3508a5,
+          0xc3588c2,
+          0xc3608d4,
+          0xc3688e2,
+          0xc3708f2,
+          0xc3788ff,
+          0xc38090f,
+          0xc38891a,
+          0xc390930,
+          0xc39893f,
+          0xc3a0953,
+          0xc3a8845,
+          0xc3b00ea,
+          0x10320845,
+          0x103293ab,
+          0x103313b7,
+          0x103393d0,
+          0x103413e3,
+          0x10348e8b,
+          0x10350c19,
+          0x103593f6,
+          0x1036140b,
+          0x1036941e,
+          0x1037143d,
+          0x10379456,
+          0x1038146b,
+          0x10389489,
+          0x10391498,
+          0x103994b4,
+          0x103a14cf,
+          0x103a94de,
+          0x103b14fa,
+          0x103b9515,
+          0x103c152c,
+          0x103c80ea,
+          0x103d153d,
+          0x103d9551,
+          0x103e1570,
+          0x103e957f,
+          0x103f1596,
+          0x103f95a9,
+          0x10400bea,
+          0x104095bc,
+          0x104115da,
+          0x104195ed,
+          0x10421607,
+          0x10429617,
+          0x1043162b,
+          0x10439641,
+          0x10441659,
+          0x1044966e,
+          0x10451682,
+          0x10459694,
+          0x104605fb,
+          0x1046893f,
+          0x104716a9,
+          0x104796c0,
+          0x104816d5,
+          0x104896e3,
+          0x14320bcd,
+          0x14328bdb,
+          0x14330bea,
+          0x14338bfc,
+          0x143400ac,
+          0x143480ea,
           0x18320083,
-          0x18328e47,
-          0x18340e75,
-          0x18348e89,
-          0x18358ec0,
-          0x18368eed,
-          0x18370f00,
-          0x18378f14,
-          0x18380f38,
-          0x18388f46,
-          0x18390f5c,
-          0x18398f70,
-          0x183a0f80,
-          0x183b0f90,
-          0x183b8fa5,
-          0x183c8fd0,
-          0x183d0fe4,
-          0x183d8ff4,
-          0x183e0b9b,
-          0x183e9001,
-          0x183f1013,
-          0x183f901e,
-          0x1840102e,
-          0x1840903f,
-          0x18411050,
-          0x18419062,
-          0x1842108b,
-          0x184290bd,
-          0x184310cc,
-          0x18451135,
-          0x1845914b,
-          0x18461166,
-          0x18468ed8,
-          0x184709d9,
-          0x18478094,
-          0x18480fbc,
-          0x18489101,
-          0x18490e5d,
-          0x18498e9e,
-          0x184a119c,
-          0x184a9119,
-          0x184b10e0,
-          0x184b8e37,
-          0x184c10a4,
-          0x184c866b,
-          0x184d1181,
-          0x203211c3,
-          0x243211cf,
-          0x24328907,
-          0x243311e1,
-          0x243391ee,
-          0x243411fb,
-          0x2434920d,
-          0x2435121c,
-          0x24359239,
-          0x24361246,
-          0x24369254,
-          0x24371262,
-          0x24379270,
-          0x24381279,
-          0x24389286,
-          0x24391299,
-          0x28320b8f,
-          0x28328b9b,
-          0x28330b6c,
-          0x28338bae,
-          0x2c322c0b,
-          0x2c32ac19,
-          0x2c332c2b,
-          0x2c33ac3d,
-          0x2c342c51,
-          0x2c34ac63,
-          0x2c352c7e,
-          0x2c35ac90,
-          0x2c362ca3,
-          0x2c3682f3,
-          0x2c372cb0,
-          0x2c37acc2,
-          0x2c382cd5,
-          0x2c38ace3,
-          0x2c392cf3,
-          0x2c39ad05,
-          0x2c3a2d19,
-          0x2c3aad2a,
-          0x2c3b1359,
-          0x2c3bad3b,
-          0x2c3c2d4f,
-          0x2c3cad65,
-          0x2c3d2d7e,
-          0x2c3dadac,
-          0x2c3e2dba,
-          0x2c3eadd2,
-          0x2c3f2dea,
-          0x2c3fadf7,
-          0x2c402e1a,
-          0x2c40ae39,
-          0x2c4111c3,
-          0x2c41ae4a,
-          0x2c422e5d,
-          0x2c429135,
-          0x2c432e6e,
-          0x2c4386a2,
-          0x2c442d9b,
+          0x18328ee1,
+          0x183300ac,
+          0x18338ef7,
+          0x18340f0b,
+          0x183480ea,
+          0x18350f20,
+          0x18358f38,
+          0x18360f4d,
+          0x18368f61,
+          0x18370f85,
+          0x18378f9b,
+          0x18380faf,
+          0x18388fbf,
+          0x18390a57,
+          0x18398fcf,
+          0x183a0fe4,
+          0x183a8ff8,
+          0x183b0c25,
+          0x183b9005,
+          0x183c1017,
+          0x183c9022,
+          0x183d1032,
+          0x183d9043,
+          0x183e1054,
+          0x183e9066,
+          0x183f108f,
+          0x183f90a8,
+          0x184010c0,
+          0x184086d3,
+          0x203210e7,
+          0x243210f3,
+          0x24328985,
+          0x24331105,
+          0x24339112,
+          0x2434111f,
+          0x24349131,
+          0x24351140,
+          0x2435915d,
+          0x2436116a,
+          0x24369178,
+          0x24371186,
+          0x24379194,
+          0x2438119d,
+          0x243891aa,
+          0x243911bd,
+          0x28320c0d,
+          0x28328c25,
+          0x28330bea,
+          0x28338c38,
+          0x28340c19,
+          0x283480ac,
+          0x283500ea,
+          0x2c322775,
+          0x2c32a783,
+          0x2c332795,
+          0x2c33a7a7,
+          0x2c3427bb,
+          0x2c34a7cd,
+          0x2c3527e8,
+          0x2c35a7fa,
+          0x2c36280d,
+          0x2c36832d,
+          0x2c37281a,
+          0x2c37a82c,
+          0x2c38283f,
+          0x2c38a856,
+          0x2c392864,
+          0x2c39a874,
+          0x2c3a2886,
+          0x2c3aa89a,
+          0x2c3b28ab,
+          0x2c3ba8ca,
+          0x2c3c28de,
+          0x2c3ca8f4,
+          0x2c3d290d,
+          0x2c3da92a,
+          0x2c3e293b,
+          0x2c3ea949,
+          0x2c3f2961,
+          0x2c3fa979,
+          0x2c402986,
+          0x2c4090e7,
+          0x2c412997,
+          0x2c41a9aa,
+          0x2c4210c0,
+          0x2c42a9bb,
+          0x2c430720,
+          0x2c43a8bc,
           0x30320000,
           0x30328015,
           0x3033001f,
@@ -333,479 +430,451 @@ Pod::Spec.new do |s|
           0x3035006b,
           0x30358083,
           0x30360094,
-          0x303680a1,
-          0x303700b0,
-          0x303780bd,
-          0x303800d0,
-          0x303880eb,
-          0x30390100,
-          0x30398114,
-          0x303a0128,
-          0x303a8139,
-          0x303b0152,
-          0x303b816f,
-          0x303c017d,
-          0x303c8191,
-          0x303d01a1,
-          0x303d81ba,
-          0x303e01ca,
-          0x303e81dd,
-          0x303f01ec,
-          0x303f81f8,
-          0x3040020d,
-          0x3040821d,
-          0x30410234,
-          0x30418241,
-          0x30420254,
-          0x30428263,
-          0x30430278,
-          0x30438299,
-          0x304402ac,
-          0x304482bf,
-          0x304502d8,
-          0x304582f3,
-          0x30460310,
-          0x30468329,
-          0x30470337,
-          0x30478348,
-          0x30480357,
-          0x3048836f,
-          0x30490381,
-          0x30498395,
-          0x304a03b4,
-          0x304a83c7,
-          0x304b03d2,
-          0x304b83e1,
-          0x304c03f2,
-          0x304c83fe,
-          0x304d0414,
-          0x304d8422,
-          0x304e0438,
-          0x304e844a,
-          0x304f045c,
-          0x304f846f,
-          0x30500482,
-          0x30508493,
-          0x305104a3,
-          0x305184bb,
-          0x305204d0,
-          0x305284e8,
-          0x305304fc,
-          0x30538514,
-          0x3054052d,
-          0x30548546,
-          0x30550563,
-          0x3055856e,
-          0x30560586,
-          0x30568596,
-          0x305705a7,
-          0x305785ba,
-          0x305805d0,
-          0x305885d9,
-          0x305905ee,
-          0x30598601,
-          0x305a0610,
-          0x305a8630,
-          0x305b063f,
-          0x305b864b,
-          0x305c066b,
-          0x305c8687,
-          0x305d0698,
-          0x305d86a2,
-          0x34320ac9,
-          0x34328add,
-          0x34330afa,
-          0x34338b0d,
-          0x34340b1c,
-          0x34348b39,
+          0x303680ac,
+          0x303700b9,
+          0x303780c8,
+          0x303800ea,
+          0x303880f7,
+          0x3039010a,
+          0x30398125,
+          0x303a013a,
+          0x303a814e,
+          0x303b0162,
+          0x303b8173,
+          0x303c018c,
+          0x303c81a9,
+          0x303d01b7,
+          0x303d81cb,
+          0x303e01db,
+          0x303e81f4,
+          0x303f0204,
+          0x303f8217,
+          0x30400226,
+          0x30408232,
+          0x30410247,
+          0x30418257,
+          0x3042026e,
+          0x3042827b,
+          0x3043028e,
+          0x3043829d,
+          0x304402b2,
+          0x304482d3,
+          0x304502e6,
+          0x304582f9,
+          0x30460312,
+          0x3046832d,
+          0x3047034a,
+          0x30478363,
+          0x30480371,
+          0x30488382,
+          0x30490391,
+          0x304983a9,
+          0x304a03bb,
+          0x304a83cf,
+          0x304b03ee,
+          0x304b8401,
+          0x304c040c,
+          0x304c841d,
+          0x304d0429,
+          0x304d843f,
+          0x304e044d,
+          0x304e8463,
+          0x304f0475,
+          0x304f8487,
+          0x3050049a,
+          0x305084ad,
+          0x305104be,
+          0x305184ce,
+          0x305204e6,
+          0x305284fb,
+          0x30530513,
+          0x30538527,
+          0x3054053f,
+          0x30548558,
+          0x30550571,
+          0x3055858e,
+          0x30560599,
+          0x305685b1,
+          0x305705c1,
+          0x305785d2,
+          0x305805e5,
+          0x305885fb,
+          0x30590604,
+          0x30598619,
+          0x305a062c,
+          0x305a863b,
+          0x305b065b,
+          0x305b866a,
+          0x305c068b,
+          0x305c86a7,
+          0x305d06b3,
+          0x305d86d3,
+          0x305e06ef,
+          0x305e8700,
+          0x305f0716,
+          0x305f8720,
+          0x34320b47,
+          0x34328b5b,
+          0x34330b78,
+          0x34338b8b,
+          0x34340b9a,
+          0x34348bb7,
           0x3c320083,
-          0x3c328bd8,
-          0x3c330bf1,
-          0x3c338c0c,
-          0x3c340c29,
-          0x3c348c44,
-          0x3c350c5f,
-          0x3c358c74,
-          0x3c360c8d,
-          0x3c368ca5,
-          0x3c370cb6,
-          0x3c378cc4,
-          0x3c380cd1,
-          0x3c388ce5,
-          0x3c390b9b,
-          0x3c398cf9,
-          0x3c3a0d0d,
-          0x3c3a8881,
-          0x3c3b0d1d,
-          0x3c3b8d38,
-          0x3c3c0d4a,
-          0x3c3c8d60,
-          0x3c3d0d6a,
-          0x3c3d8d7e,
-          0x3c3e0d8c,
-          0x3c3e8db1,
-          0x3c3f0bc4,
-          0x3c3f8d9a,
-          0x403217d3,
-          0x403297e9,
-          0x40331817,
-          0x40339821,
-          0x40341838,
-          0x40349856,
-          0x40351866,
-          0x40359878,
-          0x40361885,
-          0x40369891,
-          0x403718a6,
-          0x403798bb,
-          0x403818cd,
-          0x403898d8,
-          0x403918ea,
-          0x40398de1,
-          0x403a18fa,
-          0x403a990d,
-          0x403b192e,
-          0x403b993f,
-          0x403c194f,
-          0x403c8064,
-          0x403d195b,
-          0x403d9977,
-          0x403e198d,
-          0x403e999c,
-          0x403f19af,
-          0x403f99c9,
-          0x404019d7,
-          0x404099ec,
-          0x40411a00,
-          0x40419a1d,
-          0x40421a36,
-          0x40429a51,
-          0x40431a6a,
-          0x40439a7d,
-          0x40441a91,
-          0x40449aa9,
-          0x40451af4,
-          0x40459b02,
-          0x40461b20,
-          0x40468094,
-          0x40471b35,
-          0x40479b47,
-          0x40481b6b,
-          0x40489b99,
-          0x40491bad,
-          0x40499bc2,
-          0x404a1bdb,
-          0x404a9c15,
-          0x404b1c46,
-          0x404b9c7c,
-          0x404c1c97,
-          0x404c9cb1,
-          0x404d1cc8,
-          0x404d9cf0,
-          0x404e1d07,
-          0x404e9d23,
-          0x404f1d3f,
-          0x404f9d60,
-          0x40501d82,
-          0x40509d9e,
-          0x40511db2,
-          0x40519dbf,
-          0x40521dd6,
-          0x40529de6,
-          0x40531df6,
-          0x40539e0a,
-          0x40541e25,
-          0x40549e35,
-          0x40551e4c,
-          0x40559e5b,
-          0x40561e88,
-          0x40569ea0,
-          0x40571ebc,
-          0x40579ed5,
-          0x40581ee8,
-          0x40589efd,
-          0x40591f20,
-          0x40599f4b,
-          0x405a1f58,
-          0x405a9f71,
-          0x405b1f89,
-          0x405b9f9c,
-          0x405c1fb1,
-          0x405c9fc3,
-          0x405d1fd8,
-          0x405d9fe8,
-          0x405e2001,
-          0x405ea015,
-          0x405f2025,
-          0x405fa03d,
-          0x4060204e,
-          0x4060a061,
-          0x40612072,
-          0x4061a090,
-          0x406220a1,
-          0x4062a0ae,
-          0x406320c5,
-          0x4063a106,
-          0x4064211d,
-          0x4064a12a,
-          0x40652138,
-          0x4065a15a,
-          0x40662182,
-          0x4066a197,
-          0x406721ae,
-          0x4067a1bf,
-          0x406821d0,
-          0x4068a1e1,
-          0x406921f6,
-          0x4069a20d,
-          0x406a221e,
-          0x406aa237,
-          0x406b2252,
-          0x406ba269,
-          0x406c22d6,
-          0x406ca2f7,
-          0x406d230a,
-          0x406da32b,
-          0x406e2346,
-          0x406ea38f,
-          0x406f23b0,
-          0x406fa3d6,
-          0x407023f6,
-          0x4070a412,
-          0x4071259f,
-          0x4071a5c2,
-          0x407225d8,
-          0x4072a5f7,
-          0x4073260f,
-          0x4073a62f,
-          0x40742859,
-          0x4074a87e,
-          0x40752899,
-          0x4075a8b8,
-          0x407628e7,
-          0x4076a90f,
-          0x40772940,
-          0x4077a95f,
-          0x40782999,
-          0x4078a9b0,
-          0x407929c3,
-          0x4079a9e0,
-          0x407a0782,
-          0x407aa9f2,
-          0x407b2a05,
-          0x407baa1e,
-          0x407c2a36,
-          0x407c90bd,
-          0x407d2a4a,
-          0x407daa64,
-          0x407e2a75,
-          0x407eaa89,
-          0x407f2a97,
-          0x407faab2,
-          0x40801286,
-          0x4080aad7,
-          0x40812af9,
-          0x4081ab14,
-          0x40822b29,
-          0x4082ab41,
-          0x40832b59,
-          0x4083ab70,
-          0x40842b86,
-          0x4084ab92,
-          0x40852ba5,
-          0x4085abba,
-          0x40862bcc,
-          0x4086abe1,
-          0x40872bea,
-          0x40879cde,
-          0x40880083,
-          0x4088a0e5,
-          0x40890a17,
-          0x4089a281,
-          0x408a1bfe,
-          0x408aa2ab,
-          0x408b2928,
-          0x408ba984,
-          0x408c2361,
-          0x408c9c2f,
-          0x408d1c64,
-          0x408d9e76,
-          0x408e1ab9,
-          0x408e9add,
-          0x408f1f2e,
-          0x408f9b8b,
-          0x41f424ca,
-          0x41f9255c,
-          0x41fe244f,
-          0x41fea680,
-          0x41ff2771,
-          0x420324e3,
-          0x42082505,
-          0x4208a541,
-          0x42092433,
-          0x4209a57b,
-          0x420a248a,
-          0x420aa46a,
-          0x420b24aa,
-          0x420ba523,
-          0x420c278d,
-          0x420ca64d,
-          0x420d2667,
-          0x420da69e,
-          0x421226b8,
-          0x42172754,
-          0x4217a6fa,
-          0x421c271c,
-          0x421f26d7,
-          0x422127a4,
-          0x42262737,
-          0x422b283d,
-          0x422ba806,
-          0x422c2825,
-          0x422ca7e0,
-          0x422d27bf,
-          0x443206ad,
-          0x443286bc,
-          0x443306c8,
-          0x443386d6,
-          0x443406e9,
-          0x443486fa,
-          0x44350701,
-          0x4435870b,
-          0x4436071e,
-          0x44368734,
-          0x44370746,
-          0x44378753,
-          0x44380762,
-          0x4438876a,
-          0x44390782,
-          0x44398790,
-          0x443a07a3,
-          0x4c3212b0,
-          0x4c3292c0,
-          0x4c3312d3,
-          0x4c3392f3,
-          0x4c340094,
-          0x4c3480b0,
-          0x4c3512ff,
-          0x4c35930d,
-          0x4c361329,
-          0x4c36933c,
-          0x4c37134b,
-          0x4c379359,
-          0x4c38136e,
-          0x4c38937a,
-          0x4c39139a,
-          0x4c3993c4,
-          0x4c3a13dd,
-          0x4c3a93f6,
-          0x4c3b05d0,
-          0x4c3b940f,
-          0x4c3c1421,
-          0x4c3c9430,
-          0x4c3d10bd,
-          0x4c3d9449,
-          0x4c3e1456,
-          0x50322e80,
-          0x5032ae8f,
-          0x50332e9a,
-          0x5033aeaa,
-          0x50342ec3,
-          0x5034aedd,
-          0x50352eeb,
-          0x5035af01,
-          0x50362f13,
-          0x5036af29,
-          0x50372f42,
-          0x5037af55,
-          0x50382f6d,
-          0x5038af7e,
-          0x50392f93,
-          0x5039afa7,
-          0x503a2fc7,
-          0x503aafdd,
-          0x503b2ff5,
-          0x503bb007,
-          0x503c3023,
-          0x503cb03a,
-          0x503d3053,
-          0x503db069,
-          0x503e3076,
-          0x503eb08c,
-          0x503f309e,
-          0x503f8348,
-          0x504030b1,
-          0x5040b0c1,
-          0x504130db,
-          0x5041b0ea,
-          0x50423104,
-          0x5042b121,
-          0x50433131,
-          0x5043b141,
-          0x50443150,
-          0x50448414,
-          0x50453164,
-          0x5045b182,
-          0x50463195,
-          0x5046b1ab,
-          0x504731bd,
-          0x5047b1d2,
-          0x504831f8,
-          0x5048b206,
-          0x50493219,
-          0x5049b22e,
-          0x504a3244,
-          0x504ab254,
-          0x504b3274,
-          0x504bb287,
-          0x504c32aa,
-          0x504cb2d8,
-          0x504d32ea,
-          0x504db307,
-          0x504e3322,
-          0x504eb33e,
-          0x504f3350,
-          0x504fb367,
-          0x50503376,
-          0x50508687,
-          0x50513389,
-          0x58320e1f,
-          0x68320de1,
-          0x68328b9b,
-          0x68330bae,
-          0x68338def,
-          0x68340dff,
-          0x683480b0,
-          0x6c320dbd,
-          0x6c328b7e,
-          0x6c330dc8,
-          0x7432098d,
-          0x783208f2,
-          0x78328907,
-          0x78330913,
+          0x3c328c62,
+          0x3c330c7b,
+          0x3c338c96,
+          0x3c340cb3,
+          0x3c348cdd,
+          0x3c350cf8,
+          0x3c358d1e,
+          0x3c360d37,
+          0x3c368d4f,
+          0x3c370d60,
+          0x3c378d6e,
+          0x3c380d7b,
+          0x3c388d8f,
+          0x3c390c25,
+          0x3c398da3,
+          0x3c3a0db7,
+          0x3c3a88ff,
+          0x3c3b0dc7,
+          0x3c3b8de2,
+          0x3c3c0df4,
+          0x3c3c8e0a,
+          0x3c3d0e14,
+          0x3c3d8e28,
+          0x3c3e0e36,
+          0x3c3e8e5b,
+          0x3c3f0c4e,
+          0x3c3f8e44,
+          0x3c4000ac,
+          0x3c4080ea,
+          0x3c410cce,
+          0x3c418d0d,
+          0x403216fa,
+          0x40329710,
+          0x4033173e,
+          0x40339748,
+          0x4034175f,
+          0x4034977d,
+          0x4035178d,
+          0x4035979f,
+          0x403617ac,
+          0x403697b8,
+          0x403717cd,
+          0x403797df,
+          0x403817ea,
+          0x403897fc,
+          0x40390e8b,
+          0x4039980c,
+          0x403a181f,
+          0x403a9840,
+          0x403b1851,
+          0x403b9861,
+          0x403c0064,
+          0x403c8083,
+          0x403d186d,
+          0x403d9883,
+          0x403e1892,
+          0x403e98a5,
+          0x403f18bf,
+          0x403f98cd,
+          0x404018e2,
+          0x404098f6,
+          0x40411913,
+          0x4041992e,
+          0x40421947,
+          0x4042995a,
+          0x4043196e,
+          0x40439986,
+          0x4044199d,
+          0x404480ac,
+          0x404519b2,
+          0x404599c4,
+          0x404619e8,
+          0x40469a08,
+          0x40471a16,
+          0x40479a2a,
+          0x40481a3f,
+          0x40489a58,
+          0x40491a6f,
+          0x40499a89,
+          0x404a1aa0,
+          0x404a9abe,
+          0x404b1ad6,
+          0x404b9aed,
+          0x404c1b03,
+          0x404c9b15,
+          0x404d1b36,
+          0x404d9b58,
+          0x404e1b6c,
+          0x404e9b79,
+          0x404f1b90,
+          0x404f9ba0,
+          0x40501bca,
+          0x40509bde,
+          0x40511bf9,
+          0x40519c09,
+          0x40521c20,
+          0x40529c32,
+          0x40531c4a,
+          0x40539c5d,
+          0x40541c72,
+          0x40549c95,
+          0x40551ca3,
+          0x40559cc0,
+          0x40561ccd,
+          0x40569ce6,
+          0x40571cfe,
+          0x40579d11,
+          0x40581d26,
+          0x40589d38,
+          0x40591d48,
+          0x40599d61,
+          0x405a1d75,
+          0x405a9d85,
+          0x405b1d9d,
+          0x405b9dae,
+          0x405c1dc1,
+          0x405c9dd2,
+          0x405d1ddf,
+          0x405d9df6,
+          0x405e1e16,
+          0x405e8a95,
+          0x405f1e37,
+          0x405f9e44,
+          0x40601e52,
+          0x40609e74,
+          0x40611e9c,
+          0x40619eb1,
+          0x40621ec8,
+          0x40629ed9,
+          0x40631eea,
+          0x40639eff,
+          0x40641f16,
+          0x40649f27,
+          0x40651f42,
+          0x40659f59,
+          0x40661f71,
+          0x40669f9b,
+          0x40671fc6,
+          0x40679fe7,
+          0x40681ffa,
+          0x4068a01b,
+          0x4069204d,
+          0x4069a07b,
+          0x406a209c,
+          0x406aa0bc,
+          0x406b2244,
+          0x406ba267,
+          0x406c227d,
+          0x406ca4a9,
+          0x406d24d8,
+          0x406da500,
+          0x406e2519,
+          0x406ea531,
+          0x406f2550,
+          0x406fa565,
+          0x40702578,
+          0x4070a595,
+          0x40710800,
+          0x4071a5a7,
+          0x407225ba,
+          0x4072a5d3,
+          0x407325eb,
+          0x4073936d,
+          0x407425ff,
+          0x4074a619,
+          0x4075262a,
+          0x4075a63e,
+          0x4076264c,
+          0x407691aa,
+          0x40772671,
+          0x4077a693,
+          0x407826ae,
+          0x4078a6c3,
+          0x407926da,
+          0x4079a6f0,
+          0x407a26fc,
+          0x407aa70f,
+          0x407b2724,
+          0x407ba736,
+          0x407c274b,
+          0x407ca754,
+          0x407d2036,
+          0x407d9bb0,
+          0x41f4216f,
+          0x41f92201,
+          0x41fe20f4,
+          0x41fea2d0,
+          0x41ff23c1,
+          0x42032188,
+          0x420821aa,
+          0x4208a1e6,
+          0x420920d8,
+          0x4209a220,
+          0x420a212f,
+          0x420aa10f,
+          0x420b214f,
+          0x420ba1c8,
+          0x420c23dd,
+          0x420ca29d,
+          0x420d22b7,
+          0x420da2ee,
+          0x42122308,
+          0x421723a4,
+          0x4217a34a,
+          0x421c236c,
+          0x421f2327,
+          0x422123f4,
+          0x42262387,
+          0x422b248d,
+          0x422ba456,
+          0x422c2475,
+          0x422ca430,
+          0x422d240f,
+          0x4432072b,
+          0x4432873a,
+          0x44330746,
+          0x44338754,
+          0x44340767,
+          0x44348778,
+          0x4435077f,
+          0x44358789,
+          0x4436079c,
+          0x443687b2,
+          0x443707c4,
+          0x443787d1,
+          0x443807e0,
+          0x443887e8,
+          0x44390800,
+          0x4439880e,
+          0x443a0821,
+          0x4c3211d4,
+          0x4c3291e4,
+          0x4c3311f7,
+          0x4c339217,
+          0x4c3400ac,
+          0x4c3480ea,
+          0x4c351223,
+          0x4c359231,
+          0x4c36124d,
+          0x4c369260,
+          0x4c37126f,
+          0x4c37927d,
+          0x4c381292,
+          0x4c38929e,
+          0x4c3912be,
+          0x4c3992e8,
+          0x4c3a1301,
+          0x4c3a931a,
+          0x4c3b05fb,
+          0x4c3b9333,
+          0x4c3c1345,
+          0x4c3c9354,
+          0x4c3d136d,
+          0x4c3d937c,
+          0x4c3e1389,
+          0x503229cd,
+          0x5032a9dc,
+          0x503329e7,
+          0x5033a9f7,
+          0x50342a10,
+          0x5034aa2a,
+          0x50352a38,
+          0x5035aa4e,
+          0x50362a60,
+          0x5036aa76,
+          0x50372a8f,
+          0x5037aaa2,
+          0x50382aba,
+          0x5038aacb,
+          0x50392ae0,
+          0x5039aaf4,
+          0x503a2b14,
+          0x503aab2a,
+          0x503b2b42,
+          0x503bab54,
+          0x503c2b70,
+          0x503cab87,
+          0x503d2ba0,
+          0x503dabb6,
+          0x503e2bc3,
+          0x503eabd9,
+          0x503f2beb,
+          0x503f8382,
+          0x50402bfe,
+          0x5040ac0e,
+          0x50412c28,
+          0x5041ac37,
+          0x50422c51,
+          0x5042ac6e,
+          0x50432c7e,
+          0x5043ac8e,
+          0x50442c9d,
+          0x5044843f,
+          0x50452cb1,
+          0x5045accf,
+          0x50462ce2,
+          0x5046acf8,
+          0x50472d0a,
+          0x5047ad1f,
+          0x50482d45,
+          0x5048ad53,
+          0x50492d66,
+          0x5049ad7b,
+          0x504a2d91,
+          0x504aada1,
+          0x504b2dc1,
+          0x504badd4,
+          0x504c2df7,
+          0x504cae25,
+          0x504d2e37,
+          0x504dae54,
+          0x504e2e6f,
+          0x504eae8b,
+          0x504f2e9d,
+          0x504faeb4,
+          0x50502ec3,
+          0x505086ef,
+          0x50512ed6,
+          0x58320ec9,
+          0x68320e8b,
+          0x68328c25,
+          0x68330c38,
+          0x68338e99,
+          0x68340ea9,
+          0x683480ea,
+          0x6c320e67,
+          0x6c328bfc,
+          0x6c330e72,
+          0x74320a0b,
+          0x78320970,
+          0x78328985,
+          0x78330991,
           0x78338083,
-          0x78340922,
-          0x78348937,
-          0x78350956,
-          0x78358978,
-          0x7836098d,
-          0x783689a3,
-          0x783709b3,
-          0x783789c6,
-          0x783809d9,
-          0x783889eb,
-          0x783909f8,
-          0x78398a17,
-          0x783a0a2c,
-          0x783a8a3a,
-          0x783b0a44,
-          0x783b8a58,
-          0x783c0a6f,
-          0x783c8a84,
-          0x783d0a9b,
-          0x783d8ab0,
-          0x783e0a06,
-          0x7c3211b2,
+          0x783409a0,
+          0x783489b5,
+          0x783509d4,
+          0x783589f6,
+          0x78360a0b,
+          0x78368a21,
+          0x78370a31,
+          0x78378a44,
+          0x78380a57,
+          0x78388a69,
+          0x78390a76,
+          0x78398a95,
+          0x783a0aaa,
+          0x783a8ab8,
+          0x783b0ac2,
+          0x783b8ad6,
+          0x783c0aed,
+          0x783c8b02,
+          0x783d0b19,
+          0x783d8b2e,
+          0x783e0a84,
+          0x7c3210d6,
       };
 
       const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]);
@@ -819,8 +888,10 @@ Pod::Spec.new do |s|
           "BN_LIB\\0"
           "BOOLEAN_IS_WRONG_LENGTH\\0"
           "BUFFER_TOO_SMALL\\0"
+          "CONTEXT_NOT_INITIALISED\\0"
           "DECODE_ERROR\\0"
           "DEPTH_EXCEEDED\\0"
+          "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\\0"
           "ENCODE_ERROR\\0"
           "ERROR_GETTING_TIME\\0"
           "EXPECTING_AN_ASN1_SEQUENCE\\0"
@@ -861,7 +932,6 @@ Pod::Spec.new do |s|
           "INVALID_UNIVERSALSTRING_LENGTH\\0"
           "INVALID_UTF8STRING\\0"
           "LIST_ERROR\\0"
-          "MALLOC_FAILURE\\0"
           "MISSING_ASN1_EOS\\0"
           "MISSING_EOC\\0"
           "MISSING_SECOND_NUMBER\\0"
@@ -893,10 +963,13 @@ Pod::Spec.new do |s|
           "UNEXPECTED_EOC\\0"
           "UNIVERSALSTRING_IS_WRONG_LENGTH\\0"
           "UNKNOWN_FORMAT\\0"
+          "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\\0"
+          "UNKNOWN_SIGNATURE_ALGORITHM\\0"
           "UNKNOWN_TAG\\0"
           "UNSUPPORTED_ANY_DEFINED_BY_TYPE\\0"
           "UNSUPPORTED_PUBLIC_KEY_TYPE\\0"
           "UNSUPPORTED_TYPE\\0"
+          "WRONG_PUBLIC_KEY_TYPE\\0"
           "WRONG_TAG\\0"
           "WRONG_TYPE\\0"
           "BAD_FOPEN_MODE\\0"
@@ -969,6 +1042,7 @@ Pod::Spec.new do |s|
           "MODULUS_TOO_LARGE\\0"
           "NO_PRIVATE_VALUE\\0"
           "BAD_Q_VALUE\\0"
+          "BAD_VERSION\\0"
           "MISSING_PARAMETERS\\0"
           "NEED_NEW_SETUP_VALUES\\0"
           "BIGNUM_OUT_OF_RANGE\\0"
@@ -976,8 +1050,10 @@ Pod::Spec.new do |s|
           "D2I_ECPKPARAMETERS_FAILURE\\0"
           "EC_GROUP_NEW_BY_NAME_FAILURE\\0"
           "GROUP2PKPARAMETERS_FAILURE\\0"
+          "GROUP_MISMATCH\\0"
           "I2D_ECPKPARAMETERS_FAILURE\\0"
           "INCOMPATIBLE_OBJECTS\\0"
+          "INVALID_COFACTOR\\0"
           "INVALID_COMPRESSED_POINT\\0"
           "INVALID_COMPRESSION_BIT\\0"
           "INVALID_ENCODING\\0"
@@ -1002,27 +1078,19 @@ Pod::Spec.new do |s|
           "NOT_IMPLEMENTED\\0"
           "RANDOM_NUMBER_GENERATION_FAILED\\0"
           "OPERATION_NOT_SUPPORTED\\0"
-          "BN_DECODE_ERROR\\0"
           "COMMAND_NOT_SUPPORTED\\0"
-          "CONTEXT_NOT_INITIALISED\\0"
           "DIFFERENT_KEY_TYPES\\0"
           "DIFFERENT_PARAMETERS\\0"
-          "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\\0"
           "EXPECTING_AN_EC_KEY_KEY\\0"
           "EXPECTING_AN_RSA_KEY\\0"
-          "EXPECTING_A_DH_KEY\\0"
           "EXPECTING_A_DSA_KEY\\0"
           "ILLEGAL_OR_UNSUPPORTED_PADDING_MODE\\0"
-          "INVALID_CURVE\\0"
           "INVALID_DIGEST_LENGTH\\0"
           "INVALID_DIGEST_TYPE\\0"
           "INVALID_KEYBITS\\0"
           "INVALID_MGF1_MD\\0"
           "INVALID_PADDING_MODE\\0"
-          "INVALID_PSS_PARAMETERS\\0"
           "INVALID_PSS_SALTLEN\\0"
-          "INVALID_SALT_LENGTH\\0"
-          "INVALID_TRAILER\\0"
           "KEYS_NOT_SET\\0"
           "NO_DEFAULT_DIGEST\\0"
           "NO_KEY_SET\\0"
@@ -1032,17 +1100,8 @@ Pod::Spec.new do |s|
           "NO_PARAMETERS_SET\\0"
           "OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE\\0"
           "OPERATON_NOT_INITIALIZED\\0"
-          "PARAMETER_ENCODING_ERROR\\0"
-          "UNKNOWN_DIGEST\\0"
-          "UNKNOWN_MASK_DIGEST\\0"
-          "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\\0"
           "UNKNOWN_PUBLIC_KEY_TYPE\\0"
-          "UNKNOWN_SIGNATURE_ALGORITHM\\0"
           "UNSUPPORTED_ALGORITHM\\0"
-          "UNSUPPORTED_MASK_ALGORITHM\\0"
-          "UNSUPPORTED_MASK_PARAMETER\\0"
-          "UNSUPPORTED_SIGNATURE_TYPE\\0"
-          "WRONG_PUBLIC_KEY_TYPE\\0"
           "OUTPUT_TOO_LARGE\\0"
           "UNKNOWN_NID\\0"
           "BAD_BASE64_DECODE\\0"
@@ -1078,13 +1137,13 @@ Pod::Spec.new do |s|
           "UNKNOWN_ALGORITHM\\0"
           "UNKNOWN_CIPHER\\0"
           "UNKNOWN_CIPHER_ALGORITHM\\0"
+          "UNKNOWN_DIGEST\\0"
           "UNKNOWN_HASH\\0"
           "UNSUPPORTED_PRIVATE_KEY_ALGORITHM\\0"
           "BAD_E_VALUE\\0"
           "BAD_FIXED_HEADER_DECRYPT\\0"
           "BAD_PAD_BYTE_COUNT\\0"
           "BAD_RSA_PARAMETERS\\0"
-          "BAD_VERSION\\0"
           "BLOCK_TYPE_IS_NOT_01\\0"
           "BN_NOT_INITIALIZED\\0"
           "CANNOT_RECOVER_MULTI_PRIME_KEY\\0"
@@ -1129,7 +1188,6 @@ Pod::Spec.new do |s|
           "BAD_DIGEST_LENGTH\\0"
           "BAD_ECC_CERT\\0"
           "BAD_ECPOINT\\0"
-          "BAD_HANDSHAKE_LENGTH\\0"
           "BAD_HANDSHAKE_RECORD\\0"
           "BAD_HELLO_REQUEST\\0"
           "BAD_LENGTH\\0"
@@ -1140,7 +1198,6 @@ Pod::Spec.new do |s|
           "BAD_SSL_FILETYPE\\0"
           "BAD_WRITE_RETRY\\0"
           "BIO_NOT_SET\\0"
-          "CANNOT_SERIALIZE_PUBLIC_KEY\\0"
           "CA_DN_LENGTH_MISMATCH\\0"
           "CA_DN_TOO_LONG\\0"
           "CCS_RECEIVED_EARLY\\0"
@@ -1149,17 +1206,12 @@ Pod::Spec.new do |s|
           "CERT_LENGTH_MISMATCH\\0"
           "CHANNEL_ID_NOT_P256\\0"
           "CHANNEL_ID_SIGNATURE_INVALID\\0"
-          "CIPHER_CODE_WRONG_LENGTH\\0"
           "CIPHER_OR_HASH_UNAVAILABLE\\0"
           "CLIENTHELLO_PARSE_FAILED\\0"
           "CLIENTHELLO_TLSEXT\\0"
           "CONNECTION_REJECTED\\0"
           "CONNECTION_TYPE_NOT_SET\\0"
-          "COOKIE_MISMATCH\\0"
-          "CUSTOM_EXTENSION_CONTENTS_TOO_LARGE\\0"
           "CUSTOM_EXTENSION_ERROR\\0"
-          "D2I_ECDSA_SIG\\0"
-          "DATA_BETWEEN_CCS_AND_FINISHED\\0"
           "DATA_LENGTH_TOO_LONG\\0"
           "DECRYPTION_FAILED\\0"
           "DECRYPTION_FAILED_OR_BAD_RECORD_MAC\\0"
@@ -1168,38 +1220,28 @@ Pod::Spec.new do |s|
           "DIGEST_CHECK_FAILED\\0"
           "DTLS_MESSAGE_TOO_BIG\\0"
           "ECC_CERT_NOT_FOR_SIGNING\\0"
-          "EMPTY_SRTP_PROTECTION_PROFILE_LIST\\0"
           "EMS_STATE_INCONSISTENT\\0"
           "ENCRYPTED_LENGTH_TOO_LONG\\0"
           "ERROR_ADDING_EXTENSION\\0"
           "ERROR_IN_RECEIVED_CIPHER_LIST\\0"
           "ERROR_PARSING_EXTENSION\\0"
-          "EVP_DIGESTSIGNFINAL_FAILED\\0"
-          "EVP_DIGESTSIGNINIT_FAILED\\0"
           "EXCESSIVE_MESSAGE_SIZE\\0"
           "EXTRA_DATA_IN_MESSAGE\\0"
           "FRAGMENT_MISMATCH\\0"
-          "GOT_A_FIN_BEFORE_A_CCS\\0"
-          "GOT_CHANNEL_ID_BEFORE_A_CCS\\0"
-          "GOT_NEXT_PROTO_BEFORE_A_CCS\\0"
           "GOT_NEXT_PROTO_WITHOUT_EXTENSION\\0"
           "HANDSHAKE_FAILURE_ON_CLIENT_HELLO\\0"
-          "HANDSHAKE_RECORD_BEFORE_CCS\\0"
           "HTTPS_PROXY_REQUEST\\0"
           "HTTP_REQUEST\\0"
           "INAPPROPRIATE_FALLBACK\\0"
           "INVALID_COMMAND\\0"
           "INVALID_MESSAGE\\0"
+          "INVALID_OUTER_RECORD_TYPE\\0"
           "INVALID_SSL_SESSION\\0"
           "INVALID_TICKET_KEYS_LENGTH\\0"
           "LENGTH_MISMATCH\\0"
           "LIBRARY_HAS_NO_CIPHERS\\0"
-          "MISSING_DH_KEY\\0"
-          "MISSING_ECDSA_SIGNING_CERT\\0"
           "MISSING_EXTENSION\\0"
           "MISSING_RSA_CERTIFICATE\\0"
-          "MISSING_RSA_ENCRYPTING_CERT\\0"
-          "MISSING_RSA_SIGNING_CERT\\0"
           "MISSING_TMP_DH_KEY\\0"
           "MISSING_TMP_ECDH_KEY\\0"
           "MIXED_SPECIAL_OPERATOR_WITH_GROUPS\\0"
@@ -1211,7 +1253,6 @@ Pod::Spec.new do |s|
           "NO_CERTIFICATE_SET\\0"
           "NO_CIPHERS_AVAILABLE\\0"
           "NO_CIPHERS_PASSED\\0"
-          "NO_CIPHERS_SPECIFIED\\0"
           "NO_CIPHER_MATCH\\0"
           "NO_COMPRESSION_SPECIFIED\\0"
           "NO_METHOD_SPECIFIED\\0"
@@ -1220,13 +1261,10 @@ Pod::Spec.new do |s|
           "NO_RENEGOTIATION\\0"
           "NO_REQUIRED_DIGEST\\0"
           "NO_SHARED_CIPHER\\0"
-          "NO_SHARED_SIGATURE_ALGORITHMS\\0"
-          "NO_SRTP_PROFILES\\0"
           "NULL_SSL_CTX\\0"
           "NULL_SSL_METHOD_PASSED\\0"
           "OLD_SESSION_CIPHER_NOT_RETURNED\\0"
           "OLD_SESSION_VERSION_NOT_RETURNED\\0"
-          "PACKET_LENGTH_TOO_LONG\\0"
           "PARSE_TLSEXT\\0"
           "PATH_TOO_LONG\\0"
           "PEER_DID_NOT_RETURN_A_CERTIFICATE\\0"
@@ -1235,11 +1273,9 @@ Pod::Spec.new do |s|
           "PSK_IDENTITY_NOT_FOUND\\0"
           "PSK_NO_CLIENT_CB\\0"
           "PSK_NO_SERVER_CB\\0"
-          "READ_BIO_NOT_SET\\0"
           "READ_TIMEOUT_EXPIRED\\0"
           "RECORD_LENGTH_MISMATCH\\0"
           "RECORD_TOO_LARGE\\0"
-          "RENEGOTIATE_EXT_TOO_LONG\\0"
           "RENEGOTIATION_ENCODING_ERR\\0"
           "RENEGOTIATION_MISMATCH\\0"
           "REQUIRED_CIPHER_MISSING\\0"
@@ -1249,13 +1285,11 @@ Pod::Spec.new do |s|
           "SERVERHELLO_TLSEXT\\0"
           "SESSION_ID_CONTEXT_UNINITIALIZED\\0"
           "SESSION_MAY_NOT_BE_CREATED\\0"
-          "SIGNATURE_ALGORITHMS_ERROR\\0"
+          "SHUTDOWN_WHILE_IN_INIT\\0"
           "SIGNATURE_ALGORITHMS_EXTENSION_SENT_BY_SERVER\\0"
           "SRTP_COULD_NOT_ALLOCATE_PROFILES\\0"
-          "SRTP_PROTECTION_PROFILE_LIST_TOO_LONG\\0"
           "SRTP_UNKNOWN_PROTECTION_PROFILE\\0"
           "SSL3_EXT_INVALID_SERVERNAME\\0"
-          "SSL3_EXT_INVALID_SERVERNAME_TYPE\\0"
           "SSLV3_ALERT_BAD_CERTIFICATE\\0"
           "SSLV3_ALERT_BAD_RECORD_MAC\\0"
           "SSLV3_ALERT_CERTIFICATE_EXPIRED\\0"
@@ -1270,10 +1304,7 @@ Pod::Spec.new do |s|
           "SSLV3_ALERT_UNSUPPORTED_CERTIFICATE\\0"
           "SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION\\0"
           "SSL_HANDSHAKE_FAILURE\\0"
-          "SSL_SESSION_ID_CALLBACK_FAILED\\0"
-          "SSL_SESSION_ID_CONFLICT\\0"
           "SSL_SESSION_ID_CONTEXT_TOO_LONG\\0"
-          "SSL_SESSION_ID_HAS_BAD_LENGTH\\0"
           "TLSV1_ALERT_ACCESS_DENIED\\0"
           "TLSV1_ALERT_DECODE_ERROR\\0"
           "TLSV1_ALERT_DECRYPTION_FAILED\\0"
@@ -1292,17 +1323,12 @@ Pod::Spec.new do |s|
           "TLSV1_CERTIFICATE_UNOBTAINABLE\\0"
           "TLSV1_UNRECOGNIZED_NAME\\0"
           "TLSV1_UNSUPPORTED_EXTENSION\\0"
-          "TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER\\0"
-          "TLS_ILLEGAL_EXPORTER_LABEL\\0"
-          "TLS_INVALID_ECPOINTFORMAT_LIST\\0"
           "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\\0"
           "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\\0"
           "TOO_MANY_EMPTY_FRAGMENTS\\0"
           "TOO_MANY_WARNING_ALERTS\\0"
           "UNABLE_TO_FIND_ECDH_PARAMETERS\\0"
-          "UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS\\0"
           "UNEXPECTED_EXTENSION\\0"
-          "UNEXPECTED_GROUP_CLOSE\\0"
           "UNEXPECTED_MESSAGE\\0"
           "UNEXPECTED_OPERATOR_IN_GROUP\\0"
           "UNEXPECTED_RECORD\\0"
@@ -1314,13 +1340,10 @@ Pod::Spec.new do |s|
           "UNKNOWN_PROTOCOL\\0"
           "UNKNOWN_SSL_VERSION\\0"
           "UNKNOWN_STATE\\0"
-          "UNPROCESSED_HANDSHAKE_DATA\\0"
           "UNSAFE_LEGACY_RENEGOTIATION_DISABLED\\0"
           "UNSUPPORTED_COMPRESSION_ALGORITHM\\0"
           "UNSUPPORTED_ELLIPTIC_CURVE\\0"
           "UNSUPPORTED_PROTOCOL\\0"
-          "UNSUPPORTED_SSL_VERSION\\0"
-          "USE_SRTP_NOT_NEGOTIATED\\0"
           "WRONG_CERTIFICATE_TYPE\\0"
           "WRONG_CIPHER_RETURNED\\0"
           "WRONG_CURVE\\0"
@@ -1341,12 +1364,14 @@ Pod::Spec.new do |s|
           "IDP_MISMATCH\\0"
           "INVALID_DIRECTORY\\0"
           "INVALID_FIELD_NAME\\0"
+          "INVALID_PSS_PARAMETERS\\0"
           "INVALID_TRUST\\0"
           "ISSUER_MISMATCH\\0"
           "KEY_TYPE_MISMATCH\\0"
           "KEY_VALUES_MISMATCH\\0"
           "LOADING_CERT_DIR\\0"
           "LOADING_DEFAULTS\\0"
+          "NAME_TOO_LONG\\0"
           "NEWER_CRL_NOT_NEWER\\0"
           "NOT_PKCS7_SIGNED_DATA\\0"
           "NO_CERTIFICATES_INCLUDED\\0"
@@ -1356,8 +1381,6 @@ Pod::Spec.new do |s|
           "PUBLIC_KEY_DECODE_ERROR\\0"
           "PUBLIC_KEY_ENCODE_ERROR\\0"
           "SHOULD_RETRY\\0"
-          "UNABLE_TO_FIND_PARAMETERS_IN_CHAIN\\0"
-          "UNABLE_TO_GET_CERTS_PUBLIC_KEY\\0"
           "UNKNOWN_KEY_TYPE\\0"
           "UNKNOWN_PURPOSE_ID\\0"
           "UNKNOWN_TRUST_ID\\0"
diff --git a/src/objective-c/CronetFramework.podspec b/src/objective-c/CronetFramework.podspec
index 20af7647f70c1d583fe9e7a51182a6cf37c57923..3ebcacf05541fb6dcb84b6c8f10d8da10b78ab01 100644
--- a/src/objective-c/CronetFramework.podspec
+++ b/src/objective-c/CronetFramework.podspec
@@ -36,7 +36,7 @@ Pod::Spec.new do |s|
   s.license      = { :type => 'BSD' }
   s.vendored_framework = "Cronet.framework"
   s.author             = "The Chromium Authors"
-  s.ios.deployment_target = "8.0"
+  s.ios.deployment_target = "7.1"
   s.source       = { :http => 'https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework.zip' }
   s.preserve_paths = "Cronet.framework"
   s.public_header_files = "Cronet.framework/Headers/**/*{.h}"
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
index bd6b064f166b07d7399312602deff3c513952c9d..646bf43b5474a65265bea567b68f2f899fabfac6 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
@@ -32,6 +32,8 @@
  */
 #import "GRPCCall.h"
 
+#include <AvailabilityMacros.h>
+
 /**
  * Methods to configure GRPC channel options.
  */
@@ -43,4 +45,7 @@
  */
 + (void)setUserAgentPrefix:(NSString *)userAgentPrefix forHost:(NSString *)host;
 
++ (void)closeOpenConnections DEPRECATED_MSG_ATTRIBUTE("The API for this feature is experimental, "
+                                                      "and might be removed or modified at any "
+                                                      "time.");
 @end
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
index 5f9932d86dc867806a9883e324e67b5fc74bbef8..bcc3b9150754a1beb779c31cdf0bd8e0bd7ad675 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
@@ -46,4 +46,8 @@
   hostConfig.userAgentPrefix = userAgentPrefix;
 }
 
++ (void)closeOpenConnections {
+  [GRPCHost flushChannelCache];
+}
+
 @end
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h b/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h
index 343dd48a14e1cc7a7e2d2056a1e33f653c2faaaa..ac2a37d75f2121bbe46383e055041a0c0c4cb9e3 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h
@@ -41,7 +41,7 @@
  */
 + (BOOL)setTLSPEMRootCerts:(nullable NSString *)pemRootCert
                    forHost:(nonnull NSString *)host
-                     error:(NSError **)errorPtr;
+                     error:(NSError * _Nullable * _Nullable)errorPtr;
 /**
  * Configures @c host with TLS/SSL Client Credentials and optionally trusted root Certificate
  * Authorities. If @c pemRootCerts is nil, the default CA Certificates bundled with gRPC will be
@@ -51,6 +51,6 @@
             withPrivateKey:(nullable NSString *)pemPrivateKey
              withCertChain:(nullable NSString *)pemCertChain
                    forHost:(nonnull NSString *)host
-                     error:(NSError **)errorPtr;
+                     error:(NSError * _Nullable * _Nullable)errorPtr;
 
 @end
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index e9678f38a9c46604779b7927f95dbfaeddcff962..da9473f9a2734b64ed23d0211a97a05bedca5354 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -377,6 +377,7 @@ NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey";
       [strongSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
                                                       code:GRPCErrorCodeUnavailable
                                                   userInfo:@{NSLocalizedDescriptionKey: @"Connectivity lost."}]];
+      [[GRPCHost hostWithAddress:strongSelf->_host] disconnect];
     }
   }];
 }
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.m b/src/objective-c/GRPCClient/private/GRPCChannel.m
index d3192c983d1d0da09780acda07511135654281a7..7b7b79e1c6250a837de60f92d3a60e2d8f3c7d6a 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.m
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.m
@@ -199,9 +199,7 @@ grpc_channel_args * buildChannelArgs(NSDictionary *dictionary) {
                                   NULL, GRPC_PROPAGATE_DEFAULTS,
                                   queue.unmanagedQueue,
                                   path.UTF8String,
-                                  // Get "host" from "host:port"
-                                  // TODO(jcanizales): Use NSURLs throughout, to clarify these.
-                                  [_host componentsSeparatedByString:@":"][0].UTF8String,
+                                  NULL, // Passing NULL for host
                                   gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
 }
 
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.h b/src/objective-c/GRPCClient/private/GRPCHost.h
index 9220e2a33dbb386cf6969bc1d5b776c327d072e1..350c69bf8e858050dd411888727f5af1dfbba544 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.h
+++ b/src/objective-c/GRPCClient/private/GRPCHost.h
@@ -41,6 +41,8 @@ struct grpc_channel_credentials;
 
 @interface GRPCHost : NSObject
 
++ (void)flushChannelCache;
+
 @property(nonatomic, readonly) NSString *address;
 @property(nonatomic, copy, nullable) NSString *userAgentPrefix;
 @property(nonatomic, nullable) struct grpc_channel_credentials *channelCreds;
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index fef6385cea4d2e8c8ea23869ecd6024ba385747d..08c699f99e25430e8fd3ab40d711a453a899ffa3 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -51,6 +51,8 @@ NS_ASSUME_NONNULL_BEGIN
 // templates/src/core/surface/version.c.template .
 #define GRPC_OBJC_VERSION_STRING @"0.13.0"
 
+static NSMutableDictionary *kHostCache;
+
 @implementation GRPCHost {
   // TODO(mlumish): Investigate whether caching channels with strong links is a good idea.
   GRPCChannel *_channel;
@@ -82,13 +84,12 @@ NS_ASSUME_NONNULL_BEGIN
   }
 
   // Look up the GRPCHost in the cache.
-  static NSMutableDictionary *hostCache;
   static dispatch_once_t cacheInitialization;
   dispatch_once(&cacheInitialization, ^{
-    hostCache = [NSMutableDictionary dictionary];
+    kHostCache = [NSMutableDictionary dictionary];
   });
-  @synchronized(hostCache) {
-    GRPCHost *cachedHost = hostCache[address];
+  @synchronized(kHostCache) {
+    GRPCHost *cachedHost = kHostCache[address];
     if (cachedHost) {
       return cachedHost;
     }
@@ -96,12 +97,22 @@ NS_ASSUME_NONNULL_BEGIN
     if ((self = [super init])) {
       _address = address;
       _secure = YES;
-      hostCache[address] = self;
+      kHostCache[address] = self;
     }
   }
   return self;
 }
 
++ (void)flushChannelCache {
+  @synchronized(kHostCache) {
+    [kHostCache enumerateKeysAndObjectsUsingBlock:^(id  _Nonnull key,
+                                                    GRPCHost * _Nonnull host,
+                                                    BOOL * _Nonnull stop) {
+      [host disconnect];
+    }];
+  }
+}
+
 - (nullable grpc_call *)unmanagedCallWithPath:(NSString *)path
                               completionQueue:(GRPCCompletionQueue *)queue {
   GRPCChannel *channel;
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
index a3fa5938cdd79fd54b7c64d4652e1785f7751ef7..97f6b893407a42420d1524ab0029ce1386bfebfc 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
@@ -252,7 +252,7 @@
 
     // Each completion queue consumes one thread. There's a trade to be made between creating and
     // consuming too many threads and having contention of multiple calls in a single completion
-    // queue. Currently we favor latency and use one per call.
+    // queue. Currently we use a singleton queue.
     _queue = [GRPCCompletionQueue completionQueue];
 
     _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path completionQueue:_queue];
diff --git a/src/objective-c/GRPCClient/private/NSData+GRPC.m b/src/objective-c/GRPCClient/private/NSData+GRPC.m
index e6a6c3c605a7e5e018c1482c2140c07544c02c73..98337799e919997b98b1818c657e1f88396f8dfe 100644
--- a/src/objective-c/GRPCClient/private/NSData+GRPC.m
+++ b/src/objective-c/GRPCClient/private/NSData+GRPC.m
@@ -39,17 +39,29 @@
 
 // TODO(jcanizales): Move these two incantations to the C library.
 
-static void CopyByteBufferToCharArray(grpc_byte_buffer *buffer, char *array) {
-  size_t offset = 0;
+static void MallocAndCopyByteBufferToCharArray(grpc_byte_buffer *buffer,
+                                               size_t *length, char **array) {
   grpc_byte_buffer_reader reader;
-  grpc_byte_buffer_reader_init(&reader, buffer);
-  gpr_slice next;
-  while (grpc_byte_buffer_reader_next(&reader, &next) != 0){
-    memcpy(array + offset, GPR_SLICE_START_PTR(next),
-           (size_t)GPR_SLICE_LENGTH(next));
-    offset += GPR_SLICE_LENGTH(next);
-    gpr_slice_unref(next);
+  if (!grpc_byte_buffer_reader_init(&reader, buffer)) {
+    // grpc_byte_buffer_reader_init can fail if the data sent by the server
+    // could not be decompressed for any reason. This is an issue with the data
+    // coming from the server and thus we want the RPC to fail with error code
+    // INTERNAL.
+    *array = NULL;
+    *length = 0;
+    return;
   }
+  // The slice contains uncompressed data even if compressed data was received
+  // because the reader takes care of automatically decompressing it
+  gpr_slice slice = grpc_byte_buffer_reader_readall(&reader);
+  size_t uncompressed_length = GPR_SLICE_LENGTH(slice);
+  char *result = malloc(uncompressed_length);
+  if (result) {
+    memcpy(result, GPR_SLICE_START_PTR(slice), uncompressed_length);
+  }
+  gpr_slice_unref(slice);
+  *array = result;
+  *length = uncompressed_length;
 }
 
 static grpc_byte_buffer *CopyCharArrayToNewByteBuffer(const char *array,
@@ -65,8 +77,9 @@ static grpc_byte_buffer *CopyCharArrayToNewByteBuffer(const char *array,
   if (buffer == NULL) {
     return nil;
   }
-  NSUInteger length = grpc_byte_buffer_length(buffer);
-  char *array = malloc(length * sizeof(*array));
+  char *array;
+  size_t length;
+  MallocAndCopyByteBufferToCharArray(buffer, &length, &array);
   if (!array) {
     // TODO(jcanizales): grpc_byte_buffer is reference-counted, so we can
     // prevent this memory problem by implementing a subclass of NSData
@@ -74,8 +87,9 @@ static grpc_byte_buffer *CopyCharArrayToNewByteBuffer(const char *array,
     // can be implemented using a grpc_byte_buffer_reader.
     return nil;
   }
-  CopyByteBufferToCharArray(buffer, array);
-  return [self dataWithBytesNoCopy:array length:length freeWhenDone:YES];
+  // Not depending upon size assumption of NSUInteger
+  NSUInteger length_max = MIN(length, UINT_MAX);
+  return [self dataWithBytesNoCopy:array length:length_max freeWhenDone:YES];
 }
 
 - (grpc_byte_buffer *)grpc_byteBuffer {
@@ -85,8 +99,10 @@ static grpc_byte_buffer *CopyCharArrayToNewByteBuffer(const char *array,
   // The following implementation is thus not optimal, sometimes requiring two
   // copies (one by self.bytes and another by gpr_slice_from_copied_buffer).
   // If it turns out to be an issue, we can use enumerateByteRangesUsingblock:
-  // to create an array of gpr_slice objects to pass to grpc_raw_byte_buffer_create.
+  // to create an array of gpr_slice objects to pass to
+  // grpc_raw_byte_buffer_create.
   // That would make it do exactly one copy, always.
-  return CopyCharArrayToNewByteBuffer((const char *)self.bytes, (size_t)self.length);
+  return CopyCharArrayToNewByteBuffer((const char *)self.bytes,
+                                      (size_t)self.length);
 }
 @end
diff --git a/src/objective-c/ProtoRPC/ProtoMethod.h b/src/objective-c/ProtoRPC/ProtoMethod.h
index f9fdbb35ffdee25042362900f3d1ac5f5305a762..ae3a2723fc4c1860a35c12a0b3d7d9ed86567ab9 100644
--- a/src/objective-c/ProtoRPC/ProtoMethod.h
+++ b/src/objective-c/ProtoRPC/ProtoMethod.h
@@ -54,6 +54,9 @@ __attribute__((deprecated("Please use GRPCProtoMethod.")))
  * This subclass is empty now. Eventually we'll remove ProtoMethod class
  * to avoid potential naming conflict
  */
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
 @interface GRPCProtoMethod : ProtoMethod
+#pragma clang diagnostic pop
 
 @end
diff --git a/src/objective-c/ProtoRPC/ProtoRPC.h b/src/objective-c/ProtoRPC/ProtoRPC.h
index 5f91f6bce1e1496e71ad35aad6d2fc01dfac57f2..04fc1e45f168f7722234f76793da5a650aff0cf1 100644
--- a/src/objective-c/ProtoRPC/ProtoRPC.h
+++ b/src/objective-c/ProtoRPC/ProtoRPC.h
@@ -56,6 +56,9 @@ __attribute__((deprecated("Please use GRPCProtoCall.")))
  * This subclass is empty now. Eventually we'll remove ProtoRPC class
  * to avoid potential naming conflict
  */
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
 @interface GRPCProtoCall : ProtoRPC
+#pragma clang diagnostic pop
 
 @end
diff --git a/src/objective-c/ProtoRPC/ProtoRPC.m b/src/objective-c/ProtoRPC/ProtoRPC.m
index fb0b566f199eb9e91a1cf758f76a69bc81ee9830..e7232f268322c658bea1d86e9c15380b0dd73734 100644
--- a/src/objective-c/ProtoRPC/ProtoRPC.m
+++ b/src/objective-c/ProtoRPC/ProtoRPC.m
@@ -33,7 +33,7 @@
 
 #import "ProtoRPC.h"
 
-#import <GPBProtocolBuffers.h>
+#import <Protobuf/GPBProtocolBuffers.h>
 #import <RxLibrary/GRXWriteable.h>
 #import <RxLibrary/GRXWriter+Transformations.h>
 
diff --git a/src/objective-c/ProtoRPC/ProtoService.h b/src/objective-c/ProtoRPC/ProtoService.h
index 87d06e1ae595619a625539061efa93dbc6c86fe1..7faae1b49c9cd2694cf67a6b58a7e55c96033b81 100644
--- a/src/objective-c/ProtoRPC/ProtoService.h
+++ b/src/objective-c/ProtoRPC/ProtoService.h
@@ -55,6 +55,9 @@ __attribute__((deprecated("Please use GRPCProtoService.")))
  * This subclass is empty now. Eventually we'll remove ProtoService class
  * to avoid potential naming conflict
  */
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
 @interface GRPCProtoService : ProtoService
+#pragma clang diagnostic pop
 
 @end
diff --git a/src/objective-c/ProtoRPC/ProtoService.m b/src/objective-c/ProtoRPC/ProtoService.m
index 597c3cf0fed226d0073efc9c90951354d6c0564b..3487fac59d3bfbc14cb9c0fffa58c525edd4fc12 100644
--- a/src/objective-c/ProtoRPC/ProtoService.m
+++ b/src/objective-c/ProtoRPC/ProtoService.m
@@ -65,18 +65,18 @@
   return self;
 }
 
-- (ProtoRPC *)RPCToMethod:(NSString *)method
-           requestsWriter:(GRXWriter *)requestsWriter
-            responseClass:(Class)responseClass
-       responsesWriteable:(id<GRXWriteable>)responsesWriteable {
-  ProtoMethod *methodName = [[ProtoMethod alloc] initWithPackage:_packageName
-                                                         service:_serviceName
-                                                          method:method];
-  return [[ProtoRPC alloc] initWithHost:_host
-                                 method:methodName
-                         requestsWriter:requestsWriter
-                          responseClass:responseClass
-                     responsesWriteable:responsesWriteable];
+- (GRPCProtoCall *)RPCToMethod:(NSString *)method
+                requestsWriter:(GRXWriter *)requestsWriter
+                 responseClass:(Class)responseClass
+            responsesWriteable:(id<GRXWriteable>)responsesWriteable {
+  GRPCProtoMethod *methodName = [[GRPCProtoMethod alloc] initWithPackage:_packageName
+                                                                 service:_serviceName
+                                                                  method:method];
+  return [[GRPCProtoCall alloc] initWithHost:_host
+                                      method:methodName
+                              requestsWriter:requestsWriter
+                               responseClass:responseClass
+                          responsesWriteable:responsesWriteable];
 }
 @end
 
diff --git a/src/objective-c/README.md b/src/objective-c/README.md
index 30d9aad64cdf419dea86a95b3ea358b08abd5623..a0ca5f448a0ea02b8026dce705786494e79cea1b 100644
--- a/src/objective-c/README.md
+++ b/src/objective-c/README.md
@@ -1,12 +1,12 @@
 [![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC)
 # gRPC for Objective-C
 
-- [Install protoc with the gRPC plugin](#install)
 - [Write your API declaration in proto format](#write-protos)
 - [Integrate a proto library in your project](#cocoapods)
 - [Use the generated library in your code](#use)
 - [Use gRPC without Protobuf](#no-proto)
-- [Alternative installation methods](#alternatives)
+- [Alternatives to the steps above](#alternatives)
+    - [Install protoc with the gRPC plugin](#install)
     - [Install protoc and the gRPC plugin without using Homebrew](#no-homebrew)
     - [Integrate the generated gRPC library without using Cocoapods](#no-cocoapods)
 
@@ -15,18 +15,6 @@ usage and adds some interoperability guarantees. Here we use [Protocol Buffers][
 plugin for the Protobuf Compiler (_protoc_) to generate client libraries to communicate with gRPC
 services.
 
-<a name="install"></a>
-## Install protoc with the gRPC plugin
-
-On Mac OS X, install [homebrew][].
-
-Run the following command to install _protoc_ and the gRPC _protoc_ plugin:
-```sh
-$ curl -fsSL https://goo.gl/getgrpc | bash -
-```
-This will download and run the [gRPC install script][]. After the command completes, you're ready to
-proceed.
-
 <a name="write-protos"></a>
 ## Write your API declaration in proto format
 
@@ -40,36 +28,74 @@ Install [Cocoapods](https://cocoapods.org/#install).
 
 You need to create a Podspec file for your proto library. You may simply copy the following example
 to the directory where your `.proto` files are located, updating the name, version and license as
-necessary:
+necessary. You also need to set the `pods_root` variable to the correct value, depending on where
+you place this podspec relative to your Podfile.
 
 ```ruby
 Pod::Spec.new do |s|
   s.name     = '<Podspec file name>'
   s.version  = '0.0.1'
   s.license  = '...'
+  s.authors  = { '<your name>' => '<your email>' }
+  s.homepage = '...'
+  s.summary = '...'
+  s.source = { :git => 'https://github.com/...' }
 
   s.ios.deployment_target = '7.1'
   s.osx.deployment_target = '10.9'
 
+  # Base directory where the .proto files are.
+  src = '.'
+
+  # We'll use protoc with the gRPC plugin.
+  s.dependency '!ProtoCompiler-gRPCPlugin', '~> 0.14'
+
+  # Pods directory corresponding to this app's Podfile, relative to the location of this podspec.
+  pods_root = '<path to your Podfile>/Pods'
+
+  # Path where Cocoapods downloads protoc and the gRPC plugin.
+  protoc_dir = "#{pods_root}/!ProtoCompiler"
+  protoc = "#{protoc_dir}/protoc"
+  plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin"
+
+  # Directory where you want the generated files to be placed. This is an example.
+  dir = "#{pods_root}/#{s.name}"
+
   # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
   # You can run this command manually if you later change your protos and need to regenerate.
-  s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto"
+  # Alternatively, you can advance the version of this podspec and run `pod update`.
+  s.prepare_command = <<-CMD
+    mkdir -p #{dir}
+    #{protoc} \
+        --plugin=protoc-gen-grpc=#{plugin} \
+        --objc_out=#{dir} \
+        --grpc_out=#{dir} \
+        -I #{src} \
+        -I #{protoc_dir} \
+        #{src}/*.proto
+  CMD
 
   # The --objc_out plugin generates a pair of .pbobjc.h/.pbobjc.m files for each .proto file.
-  s.subspec "Messages" do |ms|
-    ms.source_files = "*.pbobjc.{h,m}"
-    ms.header_mappings_dir = "."
+  s.subspec 'Messages' do |ms|
+    ms.source_files = "#{dir}/*.pbobjc.{h,m}"
+    ms.header_mappings_dir = dir
     ms.requires_arc = false
-    ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+    # The generated files depend on the protobuf runtime.
+    ms.dependency 'Protobuf'
+    # This is needed by all pods that depend on Protobuf:
+    ms.pod_target_xcconfig = {
+      'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+    }
   end
 
   # The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with
   # a service defined.
-  s.subspec "Services" do |ss|
-    ss.source_files = "*.pbrpc.{h,m}"
-    ss.header_mappings_dir = "."
+  s.subspec 'Services' do |ss|
+    ss.source_files = "#{dir}/*.pbrpc.{h,m}"
+    ss.header_mappings_dir = dir
     ss.requires_arc = true
-    ss.dependency "gRPC", "~> 0.12"
+    # The generated files depend on the gRPC runtime, and on the files generated by `--objc_out`.
+    ss.dependency 'gRPC-ProtoRPC'
     ss.dependency "#{s.name}/Messages"
   end
 end
@@ -81,11 +107,14 @@ Note: If your proto files are in a directory hierarchy, you might want to adjust
 the sample Podspec above. For example, you could use:
 
 ```ruby
-  s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto"
+  s.prepare_command = <<-CMD
+    ...
+        #{src}/*.proto #{src}/**/*.proto
+  CMD
   ...
-    ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}"
+    ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}"
   ...
-    ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}"
+    ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}"
 ```
 
 Once your library has a Podspec, Cocoapods can install it into any XCode project. For that, go into
@@ -113,19 +142,33 @@ pod install
 <a name="use"></a>
 ## Use the generated library in your code
 
-Please check this [sample app][] for examples of how to use a generated gRPC library.
+Please check the [example apps][] for examples of how to use a generated gRPC library.
 
 <a name="no-proto"></a>
 ## Use gRPC without Protobuf
 
-The [sample app][] has an example of how to use the generic gRPC Objective-C client without
-generated files.
+This [tests file](https://github.com/grpc/grpc/tree/master/src/objective-c/tests/GRPCClientTests.m)
+shows how to use the generic gRPC Objective-C client without generated protobuf files.
 
 <a name="alternatives"></a>
-## Alternative installation methods
+## Alternatives to the steps above
+
+<a name="install"></a>
+### Install _protoc_ with the gRPC plugin
+
+Although it's not recommended (because it can lead to hard-to-solve version conflicts), it is
+sometimes more convenient to install _protoc_ and the gRPC plugin in your development machine,
+instead of letting Cocoapods download the appropriate versions for you. To do so, on Mac OS X or
+later, install [homebrew][].
+
+The run the following command to install _protoc_ and the gRPC _protoc_ plugin:
+```sh
+$ curl -fsSL https://goo.gl/getgrpc | bash -
+```
+This will download and run the [gRPC install script][].
 
 <a name="no-homebrew"></a>
-### Install protoc and the gRPC plugin without using Homebrew
+### Install _protoc_ and the gRPC plugin without using Homebrew
 
 First install v3 of the Protocol Buffers compiler (_protoc_), by cloning
 [its Git repository](https://github.com/google/protobuf) and following these
@@ -137,15 +180,15 @@ cloned.
 
 Compile the gRPC plugins for _protoc_:
 ```sh
-make plugins
+make grpc_objective_c_plugin
 ```
 
 Create a symbolic link to the compiled plugin binary somewhere in your `$PATH`:
 ```sh
 ln -s `pwd`/bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc
 ```
-(Notice that the name of the created link must begin with "protoc-gen-" for _protoc_ to recognize it
-as a plugin).
+(Notice that the name of the created link must begin with "`protoc-gen-`" for _protoc_ to recognize
+it as a plugin).
 
 If you don't want to create the symbolic link, you can alternatively copy the binary (with the
 appropriate name). Or you might prefer instead to specify the plugin's path as a flag when invoking
@@ -170,5 +213,5 @@ Objective-C Protobuf runtime library.
 [Protocol Buffers]:https://developers.google.com/protocol-buffers/
 [homebrew]:http://brew.sh
 [gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install
-[example Podfile]:https://github.com/grpc/grpc/blob/master/src/objective-c/examples/Sample/Podfile
-[sample app]: https://github.com/grpc/grpc/tree/master/src/objective-c/examples/Sample
+[example Podfile]:https://github.com/grpc/grpc/blob/master/examples/objective-c/helloworld/Podfile
+[example apps]: https://github.com/grpc/grpc/tree/master/examples/objective-c
diff --git a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec
index 5addf26fc46c255a8a25f9727bd7175e32094853..6e783fb5adbf84655d7b80cdb5517293d23dec09 100644
--- a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec
+++ b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec
@@ -1,28 +1,51 @@
 Pod::Spec.new do |s|
-  s.name     = "RemoteTest"
-  s.version  = "0.0.1"
-  s.license  = "New BSD"
+  s.name     = 'RemoteTest'
+  s.version  = '0.0.1'
+  s.license  = 'New BSD'
+  s.authors  = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
+  s.homepage = 'http://www.grpc.io/'
+  s.summary = 'RemoteTest example'
+  s.source = { :git => 'https://github.com/grpc/grpc.git' }
 
   s.ios.deployment_target = '7.1'
   s.osx.deployment_target = '10.9'
 
   # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+  s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+  repo_root = '../../../..'
+  bin_dir = "#{repo_root}/bins/$CONFIG"
+
+  protoc = "#{bin_dir}/protobuf/protoc"
+  well_known_types_dir = "#{repo_root}/third_party/protobuf/src"
+  plugin = "#{bin_dir}/grpc_objective_c_plugin"
+
   s.prepare_command = <<-CMD
-    protoc --objc_out=. --objcgrpc_out=. *.proto
+    #{protoc} \
+        --plugin=protoc-gen-grpc=#{plugin} \
+        --objc_out=. \
+        --grpc_out=. \
+        -I . \
+        -I #{well_known_types_dir} \
+        *.proto
   CMD
 
-  s.subspec "Messages" do |ms|
-    ms.source_files = "*.pbobjc.{h,m}"
-    ms.header_mappings_dir = "."
+  s.subspec 'Messages' do |ms|
+    ms.source_files = '*.pbobjc.{h,m}'
+    ms.header_mappings_dir = '.'
     ms.requires_arc = false
-    ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+    ms.dependency 'Protobuf'
+    # This is needed by all pods that depend on Protobuf:
+    ms.pod_target_xcconfig = {
+      'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+    }
   end
 
-  s.subspec "Services" do |ss|
-    ss.source_files = "*.pbrpc.{h,m}"
-    ss.header_mappings_dir = "."
+  s.subspec 'Services' do |ss|
+    ss.source_files = '*.pbrpc.{h,m}'
+    ss.header_mappings_dir = '.'
     ss.requires_arc = true
-    ss.dependency "gRPC", "~> 0.12"
+    ss.dependency 'gRPC-ProtoRPC'
     ss.dependency "#{s.name}/Messages"
   end
 end
diff --git a/src/objective-c/examples/Sample/Podfile b/src/objective-c/examples/Sample/Podfile
index 93859fb73403e624b5c78874a1d5c8154d579fab..8740b2f96382e5483684ad23a28c665604d1ff21 100644
--- a/src/objective-c/examples/Sample/Podfile
+++ b/src/objective-c/examples/Sample/Podfile
@@ -1,10 +1,46 @@
 source 'https://github.com/CocoaPods/Specs.git'
 platform :ios, '8.0'
 
-pod 'Protobuf', :path => "../../../../third_party/protobuf"
-pod 'BoringSSL', :podspec => "../.."
-pod 'gRPC', :path => "../../../.."
-pod 'RemoteTest', :path => "../RemoteTestClient"
+install! 'cocoapods', :deterministic_uuids => false
+
+# Location of gRPC's repo root relative to this file.
+GRPC_LOCAL_SRC = '../../../..'
 
 target 'Sample' do
+  # Depend on the generated RemoteTestClient library
+  pod 'RemoteTest', :path => "../RemoteTestClient"
+
+  # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
+  # lines in your application.
+  pod '!ProtoCompiler',  :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+  pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
+  pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
+
+  pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
+  pod 'gRPC', :path => GRPC_LOCAL_SRC
+  pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
+  pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
+  pod 'gRPC-ProtoRPC',  :path => GRPC_LOCAL_SRC
+end
+
+# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in
+# your application.
+pre_install do |installer|
+  # This is the gRPC-Core podspec object, as initialized by its podspec file.
+  grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
+
+  # Copied from gRPC-Core.podspec, except for the adjusted src_root:
+  src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
+  grpc_core_spec.pod_target_xcconfig = {
+    'GRPC_SRC_ROOT' => src_root,
+    'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
+    'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
+    # If we don't set these two settings, `include/grpc/support/time.h` and
+    # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
+    # build.
+    'USE_HEADERMAP' => 'NO',
+    'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+  }
 end
diff --git a/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj b/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj
index 611eb6032d5d39ac742bc477473e9813a1f7afd6..5c2a6d14f96ee1d24925ce998bebfa96491f24ce 100644
--- a/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj
+++ b/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj
@@ -7,16 +7,16 @@
 	objects = {
 
 /* Begin PBXBuildFile section */
+		426A5020E0E158A101BCA1D9 /* libPods-Sample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C20055928615A6F8434E26B4 /* libPods-Sample.a */; };
 		6369A2701A9322E20015FC5C /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 6369A26F1A9322E20015FC5C /* main.m */; };
 		6369A2731A9322E20015FC5C /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 6369A2721A9322E20015FC5C /* AppDelegate.m */; };
 		6369A2761A9322E20015FC5C /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 6369A2751A9322E20015FC5C /* ViewController.m */; };
 		6369A2791A9322E20015FC5C /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 6369A2771A9322E20015FC5C /* Main.storyboard */; };
 		6369A27B1A9322E20015FC5C /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 6369A27A1A9322E20015FC5C /* Images.xcassets */; };
-		FC81FE63CA655031F3524EC0 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 2DC7B7C4C0410F43B9621631 /* libPods.a */; };
 /* End PBXBuildFile section */
 
 /* Begin PBXFileReference section */
-		2DC7B7C4C0410F43B9621631 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; };
+		5A8C9F4B28733B249DE4AB6D /* Pods-Sample.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Sample.release.xcconfig"; path = "Pods/Target Support Files/Pods-Sample/Pods-Sample.release.xcconfig"; sourceTree = "<group>"; };
 		6369A26A1A9322E20015FC5C /* Sample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Sample.app; sourceTree = BUILT_PRODUCTS_DIR; };
 		6369A26E1A9322E20015FC5C /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
 		6369A26F1A9322E20015FC5C /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
@@ -26,8 +26,8 @@
 		6369A2751A9322E20015FC5C /* ViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = "<group>"; };
 		6369A2781A9322E20015FC5C /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
 		6369A27A1A9322E20015FC5C /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = "<group>"; };
-		AC29DD6FCDF962F519FEBB0D /* Pods.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.debug.xcconfig; path = "Pods/Target Support Files/Pods/Pods.debug.xcconfig"; sourceTree = "<group>"; };
-		C68330F8D451CC6ACEABA09F /* Pods.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.release.xcconfig; path = "Pods/Target Support Files/Pods/Pods.release.xcconfig"; sourceTree = "<group>"; };
+		C20055928615A6F8434E26B4 /* libPods-Sample.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-Sample.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+		E3C01DF315C4E7433BCEC6E6 /* Pods-Sample.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Sample.debug.xcconfig"; path = "Pods/Target Support Files/Pods-Sample/Pods-Sample.debug.xcconfig"; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
@@ -35,7 +35,7 @@
 			isa = PBXFrameworksBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
-				FC81FE63CA655031F3524EC0 /* libPods.a in Frameworks */,
+				426A5020E0E158A101BCA1D9 /* libPods-Sample.a in Frameworks */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -86,8 +86,8 @@
 		AB3331C9AE6488E61B2B094E /* Pods */ = {
 			isa = PBXGroup;
 			children = (
-				AC29DD6FCDF962F519FEBB0D /* Pods.debug.xcconfig */,
-				C68330F8D451CC6ACEABA09F /* Pods.release.xcconfig */,
+				E3C01DF315C4E7433BCEC6E6 /* Pods-Sample.debug.xcconfig */,
+				5A8C9F4B28733B249DE4AB6D /* Pods-Sample.release.xcconfig */,
 			);
 			name = Pods;
 			sourceTree = "<group>";
@@ -95,7 +95,7 @@
 		C4C2C5219053E079C9EFB930 /* Frameworks */ = {
 			isa = PBXGroup;
 			children = (
-				2DC7B7C4C0410F43B9621631 /* libPods.a */,
+				C20055928615A6F8434E26B4 /* libPods-Sample.a */,
 			);
 			name = Frameworks;
 			sourceTree = "<group>";
@@ -107,11 +107,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 6369A28D1A9322E20015FC5C /* Build configuration list for PBXNativeTarget "Sample" */;
 			buildPhases = (
-				41F7486D8F66994B0BFB84AF /* Check Pods Manifest.lock */,
+				41F7486D8F66994B0BFB84AF /* [CP] Check Pods Manifest.lock */,
 				6369A2661A9322E20015FC5C /* Sources */,
 				6369A2671A9322E20015FC5C /* Frameworks */,
 				6369A2681A9322E20015FC5C /* Resources */,
-				04554623324BE4A838846086 /* Copy Pods Resources */,
+				04554623324BE4A838846086 /* [CP] Copy Pods Resources */,
+				C7FAD018D05AB5F0B0FE81E2 /* [CP] Embed Pods Frameworks */,
 			);
 			buildRules = (
 			);
@@ -167,29 +168,29 @@
 /* End PBXResourcesBuildPhase section */
 
 /* Begin PBXShellScriptBuildPhase section */
-		04554623324BE4A838846086 /* Copy Pods Resources */ = {
+		04554623324BE4A838846086 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 			shellPath = /bin/sh;
-			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-Sample/Pods-Sample-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		41F7486D8F66994B0BFB84AF /* Check Pods Manifest.lock */ = {
+		41F7486D8F66994B0BFB84AF /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -197,6 +198,21 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
+		C7FAD018D05AB5F0B0FE81E2 /* [CP] Embed Pods Frameworks */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			name = "[CP] Embed Pods Frameworks";
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-Sample/Pods-Sample-frameworks.sh\"\n";
+			showEnvVarsInLog = 0;
+		};
 /* End PBXShellScriptBuildPhase section */
 
 /* Begin PBXSourcesBuildPhase section */
@@ -304,7 +320,7 @@
 		};
 		6369A28E1A9322E20015FC5C /* Debug */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = AC29DD6FCDF962F519FEBB0D /* Pods.debug.xcconfig */;
+			baseConfigurationReference = E3C01DF315C4E7433BCEC6E6 /* Pods-Sample.debug.xcconfig */;
 			buildSettings = {
 				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
 				INFOPLIST_FILE = Sample/Info.plist;
@@ -315,7 +331,7 @@
 		};
 		6369A28F1A9322E20015FC5C /* Release */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = C68330F8D451CC6ACEABA09F /* Pods.release.xcconfig */;
+			baseConfigurationReference = 5A8C9F4B28733B249DE4AB6D /* Pods-Sample.release.xcconfig */;
 			buildSettings = {
 				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
 				INFOPLIST_FILE = Sample/Info.plist;
diff --git a/src/objective-c/examples/Sample/Sample.xcodeproj/xcshareddata/xcschemes/Sample.xcscheme b/src/objective-c/examples/Sample/Sample.xcodeproj/xcshareddata/xcschemes/Sample.xcscheme
new file mode 100644
index 0000000000000000000000000000000000000000..d399e22e460ad9408616bc7085fc43b2728bfbad
--- /dev/null
+++ b/src/objective-c/examples/Sample/Sample.xcodeproj/xcshareddata/xcschemes/Sample.xcscheme
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Scheme
+   LastUpgradeVersion = "0730"
+   version = "1.3">
+   <BuildAction
+      parallelizeBuildables = "YES"
+      buildImplicitDependencies = "YES">
+      <BuildActionEntries>
+         <BuildActionEntry
+            buildForTesting = "YES"
+            buildForRunning = "YES"
+            buildForProfiling = "YES"
+            buildForArchiving = "YES"
+            buildForAnalyzing = "YES">
+            <BuildableReference
+               BuildableIdentifier = "primary"
+               BlueprintIdentifier = "6369A2691A9322E20015FC5C"
+               BuildableName = "Sample.app"
+               BlueprintName = "Sample"
+               ReferencedContainer = "container:Sample.xcodeproj">
+            </BuildableReference>
+         </BuildActionEntry>
+      </BuildActionEntries>
+   </BuildAction>
+   <TestAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      shouldUseLaunchSchemeArgsEnv = "YES">
+      <Testables>
+      </Testables>
+      <MacroExpansion>
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "6369A2691A9322E20015FC5C"
+            BuildableName = "Sample.app"
+            BlueprintName = "Sample"
+            ReferencedContainer = "container:Sample.xcodeproj">
+         </BuildableReference>
+      </MacroExpansion>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </TestAction>
+   <LaunchAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      launchStyle = "0"
+      useCustomWorkingDirectory = "NO"
+      ignoresPersistentStateOnLaunch = "NO"
+      debugDocumentVersioning = "YES"
+      debugServiceExtension = "internal"
+      allowLocationSimulation = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "6369A2691A9322E20015FC5C"
+            BuildableName = "Sample.app"
+            BlueprintName = "Sample"
+            ReferencedContainer = "container:Sample.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </LaunchAction>
+   <ProfileAction
+      buildConfiguration = "Release"
+      shouldUseLaunchSchemeArgsEnv = "YES"
+      savedToolIdentifier = ""
+      useCustomWorkingDirectory = "NO"
+      debugDocumentVersioning = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "6369A2691A9322E20015FC5C"
+            BuildableName = "Sample.app"
+            BlueprintName = "Sample"
+            ReferencedContainer = "container:Sample.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+   </ProfileAction>
+   <AnalyzeAction
+      buildConfiguration = "Debug">
+   </AnalyzeAction>
+   <ArchiveAction
+      buildConfiguration = "Release"
+      revealArchiveInOrganizer = "YES">
+   </ArchiveAction>
+</Scheme>
diff --git a/src/objective-c/examples/SwiftSample/Podfile b/src/objective-c/examples/SwiftSample/Podfile
index f2df4a34a34f8adb39fce609146328f2ccc91d5b..2f783340f56c13558634fd788ef378a93aae000d 100644
--- a/src/objective-c/examples/SwiftSample/Podfile
+++ b/src/objective-c/examples/SwiftSample/Podfile
@@ -1,10 +1,46 @@
 source 'https://github.com/CocoaPods/Specs.git'
 platform :ios, '8.0'
 
-pod 'Protobuf', :path => "../../../../third_party/protobuf"
-pod 'BoringSSL', :podspec => "../.."
-pod 'gRPC', :path => "../../../.."
-pod 'RemoteTest', :path => "../RemoteTestClient"
+install! 'cocoapods', :deterministic_uuids => false
+
+# Location of gRPC's repo root relative to this file.
+GRPC_LOCAL_SRC = '../../../..'
 
 target 'SwiftSample' do
+  # Depend on the generated RemoteTestClient library
+  pod 'RemoteTest', :path => "../RemoteTestClient"
+
+  # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following
+  # lines in your application.
+  pod '!ProtoCompiler',  :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+  pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
+  pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
+
+  pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
+  pod 'gRPC', :path => GRPC_LOCAL_SRC
+  pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
+  pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
+  pod 'gRPC-ProtoRPC',  :path => GRPC_LOCAL_SRC
+end
+
+# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in
+# your application.
+pre_install do |installer|
+  # This is the gRPC-Core podspec object, as initialized by its podspec file.
+  grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
+
+  # Copied from gRPC-Core.podspec, except for the adjusted src_root:
+  src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
+  grpc_core_spec.pod_target_xcconfig = {
+    'GRPC_SRC_ROOT' => src_root,
+    'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
+    'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
+    # If we don't set these two settings, `include/grpc/support/time.h` and
+    # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
+    # build.
+    'USE_HEADERMAP' => 'NO',
+    'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+  }
 end
diff --git a/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj b/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj
index 2f5716082bfb000f0a4d6191279baf86d22b84ed..2a1b30f2cf9fa31d145915a6b3eb91e8f13c56c2 100644
--- a/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj
+++ b/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj
@@ -7,15 +7,14 @@
 	objects = {
 
 /* Begin PBXBuildFile section */
-		253D3A297105CA46DA960A11 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = DC58ACA18DCCB1553531B885 /* libPods.a */; };
 		633BFFC81B950B210007E424 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 633BFFC71B950B210007E424 /* AppDelegate.swift */; };
 		633BFFCA1B950B210007E424 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 633BFFC91B950B210007E424 /* ViewController.swift */; };
 		633BFFCD1B950B210007E424 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 633BFFCB1B950B210007E424 /* Main.storyboard */; };
 		633BFFCF1B950B210007E424 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 633BFFCE1B950B210007E424 /* Images.xcassets */; };
+		92EDB1408A1E1E7DDAB25D9C /* libPods-SwiftSample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 69BB5C6CA3C1F97E007AC527 /* libPods-SwiftSample.a */; };
 /* End PBXBuildFile section */
 
 /* Begin PBXFileReference section */
-		12C7B447AA80E624D93B5C54 /* Pods.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.debug.xcconfig; path = "Pods/Target Support Files/Pods/Pods.debug.xcconfig"; sourceTree = "<group>"; };
 		633BFFC21B950B210007E424 /* SwiftSample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = SwiftSample.app; sourceTree = BUILT_PRODUCTS_DIR; };
 		633BFFC61B950B210007E424 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
 		633BFFC71B950B210007E424 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; };
@@ -23,8 +22,9 @@
 		633BFFCC1B950B210007E424 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
 		633BFFCE1B950B210007E424 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = "<group>"; };
 		6367AD231B951655007FD3A4 /* Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "Bridging-Header.h"; sourceTree = "<group>"; };
-		C335CBC4C160E0D9EDEE646B /* Pods.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.release.xcconfig; path = "Pods/Target Support Files/Pods/Pods.release.xcconfig"; sourceTree = "<group>"; };
-		DC58ACA18DCCB1553531B885 /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; };
+		69BB5C6CA3C1F97E007AC527 /* libPods-SwiftSample.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-SwiftSample.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+		A7E614A494D89D01BB395761 /* Pods-SwiftSample.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-SwiftSample.debug.xcconfig"; path = "Pods/Target Support Files/Pods-SwiftSample/Pods-SwiftSample.debug.xcconfig"; sourceTree = "<group>"; };
+		C314E3E246AF23AC29B38FCF /* Pods-SwiftSample.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-SwiftSample.release.xcconfig"; path = "Pods/Target Support Files/Pods-SwiftSample/Pods-SwiftSample.release.xcconfig"; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
@@ -32,7 +32,7 @@
 			isa = PBXFrameworksBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
-				253D3A297105CA46DA960A11 /* libPods.a in Frameworks */,
+				92EDB1408A1E1E7DDAB25D9C /* libPods-SwiftSample.a in Frameworks */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -42,8 +42,8 @@
 		31F283C976AE97586C17CCD9 /* Pods */ = {
 			isa = PBXGroup;
 			children = (
-				12C7B447AA80E624D93B5C54 /* Pods.debug.xcconfig */,
-				C335CBC4C160E0D9EDEE646B /* Pods.release.xcconfig */,
+				A7E614A494D89D01BB395761 /* Pods-SwiftSample.debug.xcconfig */,
+				C314E3E246AF23AC29B38FCF /* Pods-SwiftSample.release.xcconfig */,
 			);
 			name = Pods;
 			sourceTree = "<group>";
@@ -90,7 +90,7 @@
 		9D63A7F6423989BA306810CA /* Frameworks */ = {
 			isa = PBXGroup;
 			children = (
-				DC58ACA18DCCB1553531B885 /* libPods.a */,
+				69BB5C6CA3C1F97E007AC527 /* libPods-SwiftSample.a */,
 			);
 			name = Frameworks;
 			sourceTree = "<group>";
@@ -102,12 +102,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 633BFFE11B950B210007E424 /* Build configuration list for PBXNativeTarget "SwiftSample" */;
 			buildPhases = (
-				6BEEB33CA2705D7D2F2210E6 /* Check Pods Manifest.lock */,
+				6BEEB33CA2705D7D2F2210E6 /* [CP] Check Pods Manifest.lock */,
 				633BFFBE1B950B210007E424 /* Sources */,
 				633BFFBF1B950B210007E424 /* Frameworks */,
 				633BFFC01B950B210007E424 /* Resources */,
-				AC2F6F9AB1C090BB0BEE6E4D /* Copy Pods Resources */,
-				A1738A987353B0BF2C64F0F7 /* Embed Pods Frameworks */,
+				AC2F6F9AB1C090BB0BEE6E4D /* [CP] Copy Pods Resources */,
+				A1738A987353B0BF2C64F0F7 /* [CP] Embed Pods Frameworks */,
 			);
 			buildRules = (
 			);
@@ -164,14 +164,14 @@
 /* End PBXResourcesBuildPhase section */
 
 /* Begin PBXShellScriptBuildPhase section */
-		6BEEB33CA2705D7D2F2210E6 /* Check Pods Manifest.lock */ = {
+		6BEEB33CA2705D7D2F2210E6 /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -179,34 +179,34 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		A1738A987353B0BF2C64F0F7 /* Embed Pods Frameworks */ = {
+		A1738A987353B0BF2C64F0F7 /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 			shellPath = /bin/sh;
-			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-SwiftSample/Pods-SwiftSample-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		AC2F6F9AB1C090BB0BEE6E4D /* Copy Pods Resources */ = {
+		AC2F6F9AB1C090BB0BEE6E4D /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 			shellPath = /bin/sh;
-			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-SwiftSample/Pods-SwiftSample-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
 /* End PBXShellScriptBuildPhase section */
@@ -320,7 +320,7 @@
 		};
 		633BFFE21B950B210007E424 /* Debug */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = 12C7B447AA80E624D93B5C54 /* Pods.debug.xcconfig */;
+			baseConfigurationReference = A7E614A494D89D01BB395761 /* Pods-SwiftSample.debug.xcconfig */;
 			buildSettings = {
 				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
 				INFOPLIST_FILE = Info.plist;
@@ -333,7 +333,7 @@
 		};
 		633BFFE31B950B210007E424 /* Release */ = {
 			isa = XCBuildConfiguration;
-			baseConfigurationReference = C335CBC4C160E0D9EDEE646B /* Pods.release.xcconfig */;
+			baseConfigurationReference = C314E3E246AF23AC29B38FCF /* Pods-SwiftSample.release.xcconfig */;
 			buildSettings = {
 				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
 				INFOPLIST_FILE = Info.plist;
diff --git a/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/xcshareddata/xcschemes/SwiftSample.xcscheme b/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/xcshareddata/xcschemes/SwiftSample.xcscheme
new file mode 100644
index 0000000000000000000000000000000000000000..bba6a02b2b43e6ca0e9206da5d00b68602faef84
--- /dev/null
+++ b/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/xcshareddata/xcschemes/SwiftSample.xcscheme
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Scheme
+   LastUpgradeVersion = "0730"
+   version = "1.3">
+   <BuildAction
+      parallelizeBuildables = "YES"
+      buildImplicitDependencies = "YES">
+      <BuildActionEntries>
+         <BuildActionEntry
+            buildForTesting = "YES"
+            buildForRunning = "YES"
+            buildForProfiling = "YES"
+            buildForArchiving = "YES"
+            buildForAnalyzing = "YES">
+            <BuildableReference
+               BuildableIdentifier = "primary"
+               BlueprintIdentifier = "633BFFC11B950B210007E424"
+               BuildableName = "SwiftSample.app"
+               BlueprintName = "SwiftSample"
+               ReferencedContainer = "container:SwiftSample.xcodeproj">
+            </BuildableReference>
+         </BuildActionEntry>
+      </BuildActionEntries>
+   </BuildAction>
+   <TestAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      shouldUseLaunchSchemeArgsEnv = "YES">
+      <Testables>
+      </Testables>
+      <MacroExpansion>
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "633BFFC11B950B210007E424"
+            BuildableName = "SwiftSample.app"
+            BlueprintName = "SwiftSample"
+            ReferencedContainer = "container:SwiftSample.xcodeproj">
+         </BuildableReference>
+      </MacroExpansion>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </TestAction>
+   <LaunchAction
+      buildConfiguration = "Debug"
+      selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+      selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+      launchStyle = "0"
+      useCustomWorkingDirectory = "NO"
+      ignoresPersistentStateOnLaunch = "NO"
+      debugDocumentVersioning = "YES"
+      debugServiceExtension = "internal"
+      allowLocationSimulation = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "633BFFC11B950B210007E424"
+            BuildableName = "SwiftSample.app"
+            BlueprintName = "SwiftSample"
+            ReferencedContainer = "container:SwiftSample.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+      <AdditionalOptions>
+      </AdditionalOptions>
+   </LaunchAction>
+   <ProfileAction
+      buildConfiguration = "Release"
+      shouldUseLaunchSchemeArgsEnv = "YES"
+      savedToolIdentifier = ""
+      useCustomWorkingDirectory = "NO"
+      debugDocumentVersioning = "YES">
+      <BuildableProductRunnable
+         runnableDebuggingMode = "0">
+         <BuildableReference
+            BuildableIdentifier = "primary"
+            BlueprintIdentifier = "633BFFC11B950B210007E424"
+            BuildableName = "SwiftSample.app"
+            BlueprintName = "SwiftSample"
+            ReferencedContainer = "container:SwiftSample.xcodeproj">
+         </BuildableReference>
+      </BuildableProductRunnable>
+   </ProfileAction>
+   <AnalyzeAction
+      buildConfiguration = "Debug">
+   </AnalyzeAction>
+   <ArchiveAction
+      buildConfiguration = "Release"
+      revealArchiveInOrganizer = "YES">
+   </ArchiveAction>
+</Scheme>
diff --git a/src/objective-c/examples/SwiftSample/ViewController.swift b/src/objective-c/examples/SwiftSample/ViewController.swift
index a21ce07978df7ba03a725132a2b01be181e44611..2a95d2de516903abae8924d58dfb9ca1db553d53 100644
--- a/src/objective-c/examples/SwiftSample/ViewController.swift
+++ b/src/objective-c/examples/SwiftSample/ViewController.swift
@@ -71,7 +71,8 @@ class ViewController: UIViewController {
       NSLog("2. Response trailers: \(RPC.responseTrailers)")
     }
 
-    RPC.requestHeaders["My-Header"] = "My value"
+    // TODO(jcanizales): Revert to using subscript syntax once XCode 8 is released.
+    RPC.requestHeaders.setObject("My value", forKey: "My-Header")
 
     RPC.start()
 
@@ -84,7 +85,7 @@ class ViewController: UIViewController {
 
     let call = GRPCCall(host: RemoteHost, path: method.HTTPPath, requestsWriter: requestsWriter)
 
-    call.requestHeaders["My-Header"] = "My value"
+    call.requestHeaders.setObject("My value", forKey: "My-Header")
 
     call.startWithWriteable(GRXWriteable { response, error in
       if let response = response as? NSData {
diff --git a/src/objective-c/tests/Connectivity/Base.lproj/Main.storyboard b/src/objective-c/tests/Connectivity/Base.lproj/Main.storyboard
new file mode 100644
index 0000000000000000000000000000000000000000..9a05b8635d1ef71a5bfb70f953d553aedd91d6ba
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/Base.lproj/Main.storyboard
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="11129.15" systemVersion="15F34" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" colorMatched="YES" initialViewController="BYZ-38-t0r">
+    <dependencies>
+        <deployment identifier="iOS"/>
+        <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="11103.10"/>
+    </dependencies>
+    <scenes>
+        <!--View Controller-->
+        <scene sceneID="tne-QT-ifu">
+            <objects>
+                <viewController id="BYZ-38-t0r" customClass="ViewController" sceneMemberID="viewController"/>
+                <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/>
+            </objects>
+        </scene>
+    </scenes>
+</document>
diff --git a/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj
new file mode 100644
index 0000000000000000000000000000000000000000..2a9466c03f11e8183720c6520d4919b174c51a66
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj
@@ -0,0 +1,353 @@
+// !$*UTF8*$!
+{
+	archiveVersion = 1;
+	classes = {
+	};
+	objectVersion = 46;
+	objects = {
+
+/* Begin PBXBuildFile section */
+		500A4E0AC9D489EB214D1ED4 /* libPods-ConnectivityTestingApp.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C2AF815D8242A2172891621D /* libPods-ConnectivityTestingApp.a */; };
+		63BFB9CC1D2478DD00E17927 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 63BFB9CB1D2478DD00E17927 /* main.m */; };
+		63BFB9D21D2478DD00E17927 /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 63BFB9D11D2478DD00E17927 /* ViewController.m */; };
+		63BFB9D51D2478DD00E17927 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 63BFB9D31D2478DD00E17927 /* Main.storyboard */; };
+/* End PBXBuildFile section */
+
+/* Begin PBXFileReference section */
+		63BFB9C71D2478DD00E17927 /* ConnectivityTestingApp.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = ConnectivityTestingApp.app; sourceTree = BUILT_PRODUCTS_DIR; };
+		63BFB9CB1D2478DD00E17927 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = SOURCE_ROOT; };
+		63BFB9D11D2478DD00E17927 /* ViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = SOURCE_ROOT; };
+		63BFB9D41D2478DD00E17927 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
+		63BFB9DB1D2478DD00E17927 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = SOURCE_ROOT; };
+		BA96CBC1612BD2F70E66246C /* Pods-ConnectivityTestingApp.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-ConnectivityTestingApp.release.xcconfig"; path = "Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp.release.xcconfig"; sourceTree = "<group>"; };
+		C2AF815D8242A2172891621D /* libPods-ConnectivityTestingApp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-ConnectivityTestingApp.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+		FC9BD3AE427396EDB4CD13E3 /* Pods-ConnectivityTestingApp.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-ConnectivityTestingApp.debug.xcconfig"; path = "Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp.debug.xcconfig"; sourceTree = "<group>"; };
+/* End PBXFileReference section */
+
+/* Begin PBXFrameworksBuildPhase section */
+		63BFB9C41D2478DD00E17927 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				500A4E0AC9D489EB214D1ED4 /* libPods-ConnectivityTestingApp.a in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXFrameworksBuildPhase section */
+
+/* Begin PBXGroup section */
+		16E6C67F2E48B42376DFFD2A /* Pods */ = {
+			isa = PBXGroup;
+			children = (
+				FC9BD3AE427396EDB4CD13E3 /* Pods-ConnectivityTestingApp.debug.xcconfig */,
+				BA96CBC1612BD2F70E66246C /* Pods-ConnectivityTestingApp.release.xcconfig */,
+			);
+			name = Pods;
+			sourceTree = "<group>";
+		};
+		48F8EC18C66D3416A41F76F5 /* Frameworks */ = {
+			isa = PBXGroup;
+			children = (
+				C2AF815D8242A2172891621D /* libPods-ConnectivityTestingApp.a */,
+			);
+			name = Frameworks;
+			sourceTree = "<group>";
+		};
+		63BFB9BE1D2478DD00E17927 = {
+			isa = PBXGroup;
+			children = (
+				63BFB9D11D2478DD00E17927 /* ViewController.m */,
+				63BFB9D31D2478DD00E17927 /* Main.storyboard */,
+				63BFB9DB1D2478DD00E17927 /* Info.plist */,
+				63BFB9CB1D2478DD00E17927 /* main.m */,
+				63BFB9C81D2478DD00E17927 /* Products */,
+				16E6C67F2E48B42376DFFD2A /* Pods */,
+				48F8EC18C66D3416A41F76F5 /* Frameworks */,
+			);
+			sourceTree = "<group>";
+		};
+		63BFB9C81D2478DD00E17927 /* Products */ = {
+			isa = PBXGroup;
+			children = (
+				63BFB9C71D2478DD00E17927 /* ConnectivityTestingApp.app */,
+			);
+			name = Products;
+			sourceTree = "<group>";
+		};
+/* End PBXGroup section */
+
+/* Begin PBXNativeTarget section */
+		63BFB9C61D2478DD00E17927 /* ConnectivityTestingApp */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 63BFB9DE1D2478DD00E17927 /* Build configuration list for PBXNativeTarget "ConnectivityTestingApp" */;
+			buildPhases = (
+				4DCA2703A0AA5DC1BD2751B8 /* [CP] Check Pods Manifest.lock */,
+				63BFB9C31D2478DD00E17927 /* Sources */,
+				63BFB9C41D2478DD00E17927 /* Frameworks */,
+				63BFB9C51D2478DD00E17927 /* Resources */,
+				8593A2388A8F7BF5A7E98D26 /* [CP] Embed Pods Frameworks */,
+				5347BF6C41E7888C1C05CD88 /* [CP] Copy Pods Resources */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = ConnectivityTestingApp;
+			productName = ConnectivityTestingApp;
+			productReference = 63BFB9C71D2478DD00E17927 /* ConnectivityTestingApp.app */;
+			productType = "com.apple.product-type.application";
+		};
+/* End PBXNativeTarget section */
+
+/* Begin PBXProject section */
+		63BFB9BF1D2478DD00E17927 /* Project object */ = {
+			isa = PBXProject;
+			attributes = {
+				LastUpgradeCheck = 0800;
+				ORGANIZATIONNAME = gRPC;
+				TargetAttributes = {
+					63BFB9C61D2478DD00E17927 = {
+						CreatedOnToolsVersion = 8.0;
+						DevelopmentTeam = EQHXZ8M8AV;
+						DevelopmentTeamName = "Google, Inc.";
+						ProvisioningStyle = Automatic;
+					};
+				};
+			};
+			buildConfigurationList = 63BFB9C21D2478DD00E17927 /* Build configuration list for PBXProject "ConnectivityTestingApp" */;
+			compatibilityVersion = "Xcode 3.2";
+			developmentRegion = English;
+			hasScannedForEncodings = 0;
+			knownRegions = (
+				en,
+				Base,
+			);
+			mainGroup = 63BFB9BE1D2478DD00E17927;
+			productRefGroup = 63BFB9C81D2478DD00E17927 /* Products */;
+			projectDirPath = "";
+			projectRoot = "";
+			targets = (
+				63BFB9C61D2478DD00E17927 /* ConnectivityTestingApp */,
+			);
+		};
+/* End PBXProject section */
+
+/* Begin PBXResourcesBuildPhase section */
+		63BFB9C51D2478DD00E17927 /* Resources */ = {
+			isa = PBXResourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				63BFB9D51D2478DD00E17927 /* Main.storyboard in Resources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXResourcesBuildPhase section */
+
+/* Begin PBXShellScriptBuildPhase section */
+		4DCA2703A0AA5DC1BD2751B8 /* [CP] Check Pods Manifest.lock */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			name = "[CP] Check Pods Manifest.lock";
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
+			showEnvVarsInLog = 0;
+		};
+		5347BF6C41E7888C1C05CD88 /* [CP] Copy Pods Resources */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			name = "[CP] Copy Pods Resources";
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp-resources.sh\"\n";
+			showEnvVarsInLog = 0;
+		};
+		8593A2388A8F7BF5A7E98D26 /* [CP] Embed Pods Frameworks */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			name = "[CP] Embed Pods Frameworks";
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-ConnectivityTestingApp/Pods-ConnectivityTestingApp-frameworks.sh\"\n";
+			showEnvVarsInLog = 0;
+		};
+/* End PBXShellScriptBuildPhase section */
+
+/* Begin PBXSourcesBuildPhase section */
+		63BFB9C31D2478DD00E17927 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				63BFB9D21D2478DD00E17927 /* ViewController.m in Sources */,
+				63BFB9CC1D2478DD00E17927 /* main.m in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+/* End PBXSourcesBuildPhase section */
+
+/* Begin PBXVariantGroup section */
+		63BFB9D31D2478DD00E17927 /* Main.storyboard */ = {
+			isa = PBXVariantGroup;
+			children = (
+				63BFB9D41D2478DD00E17927 /* Base */,
+			);
+			name = Main.storyboard;
+			path = .;
+			sourceTree = SOURCE_ROOT;
+		};
+/* End PBXVariantGroup section */
+
+/* Begin XCBuildConfiguration section */
+		63BFB9DC1D2478DD00E17927 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				CLANG_ANALYZER_NONNULL = YES;
+				CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
+				CLANG_CXX_LIBRARY = "libc++";
+				CLANG_ENABLE_MODULES = YES;
+				CLANG_ENABLE_OBJC_ARC = YES;
+				CLANG_WARN_BOOL_CONVERSION = YES;
+				CLANG_WARN_CONSTANT_CONVERSION = YES;
+				CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+				CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
+				CLANG_WARN_EMPTY_BODY = YES;
+				CLANG_WARN_ENUM_CONVERSION = YES;
+				CLANG_WARN_INT_CONVERSION = YES;
+				CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+				CLANG_WARN_UNREACHABLE_CODE = YES;
+				CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+				"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+				COPY_PHASE_STRIP = NO;
+				DEBUG_INFORMATION_FORMAT = dwarf;
+				ENABLE_STRICT_OBJC_MSGSEND = YES;
+				ENABLE_TESTABILITY = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_DYNAMIC_NO_PIC = NO;
+				GCC_NO_COMMON_BLOCKS = YES;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"DEBUG=1",
+					"$(inherited)",
+				);
+				GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+				GCC_WARN_UNDECLARED_SELECTOR = YES;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+				GCC_WARN_UNUSED_FUNCTION = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				IPHONEOS_DEPLOYMENT_TARGET = 10.0;
+				MTL_ENABLE_DEBUG_INFO = YES;
+				ONLY_ACTIVE_ARCH = YES;
+				SDKROOT = iphoneos;
+			};
+			name = Debug;
+		};
+		63BFB9DD1D2478DD00E17927 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				ALWAYS_SEARCH_USER_PATHS = NO;
+				CLANG_ANALYZER_NONNULL = YES;
+				CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
+				CLANG_CXX_LIBRARY = "libc++";
+				CLANG_ENABLE_MODULES = YES;
+				CLANG_ENABLE_OBJC_ARC = YES;
+				CLANG_WARN_BOOL_CONVERSION = YES;
+				CLANG_WARN_CONSTANT_CONVERSION = YES;
+				CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+				CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
+				CLANG_WARN_EMPTY_BODY = YES;
+				CLANG_WARN_ENUM_CONVERSION = YES;
+				CLANG_WARN_INT_CONVERSION = YES;
+				CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+				CLANG_WARN_UNREACHABLE_CODE = YES;
+				CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+				"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+				COPY_PHASE_STRIP = NO;
+				DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+				ENABLE_NS_ASSERTIONS = NO;
+				ENABLE_STRICT_OBJC_MSGSEND = YES;
+				GCC_C_LANGUAGE_STANDARD = gnu99;
+				GCC_NO_COMMON_BLOCKS = YES;
+				GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+				GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+				GCC_WARN_UNDECLARED_SELECTOR = YES;
+				GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+				GCC_WARN_UNUSED_FUNCTION = YES;
+				GCC_WARN_UNUSED_VARIABLE = YES;
+				IPHONEOS_DEPLOYMENT_TARGET = 10.0;
+				MTL_ENABLE_DEBUG_INFO = NO;
+				SDKROOT = iphoneos;
+				VALIDATE_PRODUCT = YES;
+			};
+			name = Release;
+		};
+		63BFB9DF1D2478DD00E17927 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			baseConfigurationReference = FC9BD3AE427396EDB4CD13E3 /* Pods-ConnectivityTestingApp.debug.xcconfig */;
+			buildSettings = {
+				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
+				INFOPLIST_FILE = Info.plist;
+				IPHONEOS_DEPLOYMENT_TARGET = 9.3;
+				LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
+				PRODUCT_BUNDLE_IDENTIFIER = io.grpc.ConnectivityTestingApp;
+				PRODUCT_NAME = "$(TARGET_NAME)";
+			};
+			name = Debug;
+		};
+		63BFB9E01D2478DD00E17927 /* Release */ = {
+			isa = XCBuildConfiguration;
+			baseConfigurationReference = BA96CBC1612BD2F70E66246C /* Pods-ConnectivityTestingApp.release.xcconfig */;
+			buildSettings = {
+				ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
+				INFOPLIST_FILE = Info.plist;
+				IPHONEOS_DEPLOYMENT_TARGET = 9.3;
+				LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
+				PRODUCT_BUNDLE_IDENTIFIER = io.grpc.ConnectivityTestingApp;
+				PRODUCT_NAME = "$(TARGET_NAME)";
+			};
+			name = Release;
+		};
+/* End XCBuildConfiguration section */
+
+/* Begin XCConfigurationList section */
+		63BFB9C21D2478DD00E17927 /* Build configuration list for PBXProject "ConnectivityTestingApp" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				63BFB9DC1D2478DD00E17927 /* Debug */,
+				63BFB9DD1D2478DD00E17927 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Release;
+		};
+		63BFB9DE1D2478DD00E17927 /* Build configuration list for PBXNativeTarget "ConnectivityTestingApp" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				63BFB9DF1D2478DD00E17927 /* Debug */,
+				63BFB9E01D2478DD00E17927 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Release;
+		};
+/* End XCConfigurationList section */
+	};
+	rootObject = 63BFB9BF1D2478DD00E17927 /* Project object */;
+}
diff --git a/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata
new file mode 100644
index 0000000000000000000000000000000000000000..b541b4b44d5220c3bccf391fc4bafc022b23f59e
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Workspace
+   version = "1.0">
+   <FileRef
+      location = "self:ConnectivityTestingApp.xcodeproj">
+   </FileRef>
+</Workspace>
diff --git a/src/objective-c/tests/Connectivity/Info.plist b/src/objective-c/tests/Connectivity/Info.plist
new file mode 100644
index 0000000000000000000000000000000000000000..8a9fb887013665d59f6bc67c59f64009efce32d4
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/Info.plist
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+	<key>CFBundleDevelopmentRegion</key>
+	<string>en</string>
+	<key>CFBundleExecutable</key>
+	<string>$(EXECUTABLE_NAME)</string>
+	<key>CFBundleIdentifier</key>
+	<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
+	<key>CFBundleInfoDictionaryVersion</key>
+	<string>6.0</string>
+	<key>CFBundleName</key>
+	<string>$(PRODUCT_NAME)</string>
+	<key>CFBundlePackageType</key>
+	<string>APPL</string>
+	<key>CFBundleShortVersionString</key>
+	<string>1.0</string>
+	<key>CFBundleSignature</key>
+	<string>????</string>
+	<key>CFBundleVersion</key>
+	<string>1</string>
+	<key>LSRequiresIPhoneOS</key>
+	<true/>
+	<key>UILaunchStoryboardName</key>
+	<string>Main</string>
+	<key>UIMainStoryboardFile</key>
+	<string>Main</string>
+	<key>UIRequiredDeviceCapabilities</key>
+	<array>
+		<string>armv7</string>
+	</array>
+	<key>UISupportedInterfaceOrientations</key>
+	<array>
+		<string>UIInterfaceOrientationPortrait</string>
+		<string>UIInterfaceOrientationLandscapeLeft</string>
+		<string>UIInterfaceOrientationLandscapeRight</string>
+	</array>
+</dict>
+</plist>
diff --git a/src/objective-c/tests/Connectivity/Podfile b/src/objective-c/tests/Connectivity/Podfile
new file mode 100644
index 0000000000000000000000000000000000000000..f9224d9e4e970249d8996b3d6c31d7bd4139ddae
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/Podfile
@@ -0,0 +1,10 @@
+install! 'cocoapods', :deterministic_uuids => false
+
+# Location of gRPC's repo root relative to this file.
+GRPC_LOCAL_SRC = '../../../..'
+
+target 'ConnectivityTestingApp' do
+  pod 'gRPC', :path => GRPC_LOCAL_SRC
+  pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
+  pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
+end
diff --git a/src/objective-c/tests/Connectivity/README.md b/src/objective-c/tests/Connectivity/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..851cb9d1dabebc9d884d0fbdd81e1e7d4c786a78
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/README.md
@@ -0,0 +1,16 @@
+This app can be used to manually test gRPC under changing network conditions.
+
+It makes RPCs in a loop, logging when the request is sent and the response is received.
+
+To test on the simulator, run `pod install`, open the workspace created by Cocoapods, and run the app.
+Once running, disable WiFi (or ethernet) _in your computer_, then enable it again after a while. Don't
+bother with the simulator's WiFi or cell settings, as they have no effect: Simulator apps are just Mac
+apps running within the simulator UI.
+
+The expected result is to never see a "hanged" RPC: success or failure should happen almost immediately
+after sending the request. Symptom of a hanged RPC is a log like the following being the last in your
+console:
+
+```
+2016-06-29 16:51:29.443 ConnectivityTestingApp[73129:3567949] Sending request.
+```
diff --git a/src/objective-c/tests/Connectivity/ViewController.m b/src/objective-c/tests/Connectivity/ViewController.m
new file mode 100644
index 0000000000000000000000000000000000000000..2b199c9617fbc062b0a1cacc2fbc77424ca3750d
--- /dev/null
+++ b/src/objective-c/tests/Connectivity/ViewController.m
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#import <UIKit/UIKit.h>
+
+#import <GRPCClient/GRPCCall.h>
+#import <ProtoRPC/ProtoMethod.h>
+#import <RxLibrary/GRXWriter+Immediate.h>
+#import <RxLibrary/GRXWriter+Transformations.h>
+
+@interface ViewController : UIViewController
+@end
+
+@implementation ViewController
+- (void)viewDidLoad {
+  [super viewDidLoad];
+
+  NSString *host = @"grpc-test.sandbox.googleapis.com";
+
+  GRPCProtoMethod *method = [[GRPCProtoMethod alloc] initWithPackage:@"grpc.testing"
+                                                             service:@"TestService"
+                                                              method:@"StreamingOutputCall"];
+
+  __block void (^startCall)() = ^{
+    GRXWriter *loggingRequestWriter = [[GRXWriter writerWithValue:[NSData data]] map:^id(id value) {
+      NSLog(@"Sending request.");
+      return value;
+    }];
+
+    GRPCCall *call = [[GRPCCall alloc] initWithHost:host
+                                               path:method.HTTPPath
+                                     requestsWriter:loggingRequestWriter];
+
+    [call startWithWriteable:[GRXWriteable writeableWithEventHandler:^(BOOL done, id value,
+                                                                       NSError *error) {
+      if (!done) {
+        return;
+      }
+      if (error) {
+        NSLog(@"Finished with error %@", error);
+      } else {
+        NSLog(@"Finished successfully.");
+      }
+
+      dispatch_time_t oneSecond = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(1 * NSEC_PER_SEC));
+      dispatch_after(oneSecond, dispatch_get_main_queue(), startCall);
+    }]];
+  };
+
+  startCall();
+}
+@end
diff --git a/src/core/lib/surface/surface_trace.h b/src/objective-c/tests/Connectivity/main.m
similarity index 74%
rename from src/core/lib/surface/surface_trace.h
rename to src/objective-c/tests/Connectivity/main.m
index a69a0fff577336ee7821020ccdc0013f921d488d..5e09196d54902cdcefa217c40534f458a3baf3e3 100644
--- a/src/core/lib/surface/surface_trace.h
+++ b/src/objective-c/tests/Connectivity/main.m
@@ -31,18 +31,16 @@
  *
  */
 
-#ifndef GRPC_CORE_LIB_SURFACE_SURFACE_TRACE_H
-#define GRPC_CORE_LIB_SURFACE_SURFACE_TRACE_H
+#import <UIKit/UIKit.h>
 
-#include <grpc/support/log.h>
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/surface/api_trace.h"
+@interface AppDelegate : UIResponder <UIApplicationDelegate>
+@property (strong, nonatomic) UIWindow *window;
+@end
+@implementation AppDelegate
+@end
 
-#define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event)    \
-  if (grpc_api_trace) {                                 \
-    char *_ev = grpc_event_string(event);               \
-    gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \
-    gpr_free(_ev);                                      \
+int main(int argc, char * argv[]) {
+  @autoreleasepool {
+      return UIApplicationMain(argc, argv, nil, NSStringFromClass(AppDelegate.class));
   }
-
-#endif /* GRPC_CORE_LIB_SURFACE_SURFACE_TRACE_H */
+}
diff --git a/src/objective-c/tests/GRPCClientTests.m b/src/objective-c/tests/GRPCClientTests.m
index 9a8d425324129823a37e1cbbdc8b736cc217366f..1167a715bb9c06158499428462f5902e17c4ac35 100644
--- a/src/objective-c/tests/GRPCClientTests.m
+++ b/src/objective-c/tests/GRPCClientTests.m
@@ -48,9 +48,9 @@ static NSString * const kPackage = @"grpc.testing";
 static NSString * const kService = @"TestService";
 static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.googleapis.com";
 
-static ProtoMethod *kInexistentMethod;
-static ProtoMethod *kEmptyCallMethod;
-static ProtoMethod *kUnaryCallMethod;
+static GRPCProtoMethod *kInexistentMethod;
+static GRPCProtoMethod *kEmptyCallMethod;
+static GRPCProtoMethod *kUnaryCallMethod;
 
 /** Observer class for testing that responseMetadata is KVO-compliant */
 @interface PassthroughObserver : NSObject
@@ -109,15 +109,15 @@ static ProtoMethod *kUnaryCallMethod;
   [GRPCCall useInsecureConnectionsForHost:kHostAddress];
 
   // This method isn't implemented by the remote server.
-  kInexistentMethod = [[ProtoMethod alloc] initWithPackage:kPackage
-                                                   service:kService
-                                                    method:@"Inexistent"];
-  kEmptyCallMethod = [[ProtoMethod alloc] initWithPackage:kPackage
-                                                  service:kService
-                                                   method:@"EmptyCall"];
-  kUnaryCallMethod = [[ProtoMethod alloc] initWithPackage:kPackage
-                                                  service:kService
-                                                   method:@"UnaryCall"];
+  kInexistentMethod = [[GRPCProtoMethod alloc] initWithPackage:kPackage
+                                                       service:kService
+                                                        method:@"Inexistent"];
+  kEmptyCallMethod = [[GRPCProtoMethod alloc] initWithPackage:kPackage
+                                                      service:kService
+                                                       method:@"EmptyCall"];
+  kUnaryCallMethod = [[GRPCProtoMethod alloc] initWithPackage:kPackage
+                                                      service:kService
+                                                       method:@"UnaryCall"];
 }
 
 - (void)testConnectionToRemoteServer {
@@ -303,9 +303,9 @@ static ProtoMethod *kUnaryCallMethod;
 
   // Try to set parameters to nil for GRPCCall. This should cause an exception
   @try {
-    GRPCCall *call = [[GRPCCall alloc] initWithHost:nil
-                                               path:nil
-                                     requestsWriter:nil];
+    (void)[[GRPCCall alloc] initWithHost:nil
+                                    path:nil
+                          requestsWriter:nil];
     XCTFail(@"Did not receive an exception when parameters are nil");
   } @catch(NSException *theException) {
     NSLog(@"Received exception as expected: %@", theException.name);
@@ -316,9 +316,9 @@ static ProtoMethod *kUnaryCallMethod;
   GRXWriter *requestsWriter = [GRXWriter emptyWriter];
   [requestsWriter finishWithError:nil];
   @try {
-    GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
-                                               path:kUnaryCallMethod.HTTPPath
-                                     requestsWriter:requestsWriter];
+    (void)[[GRPCCall alloc] initWithHost:kHostAddress
+                                    path:kUnaryCallMethod.HTTPPath
+                          requestsWriter:requestsWriter];
     XCTFail(@"Did not receive an exception when GRXWriter has incorrect state.");
   } @catch(NSException *theException) {
     NSLog(@"Received exception as expected: %@", theException.name);
diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m
index 781c500f73e976fcb8f479803829e13f8a823dd6..494743d6041ed690a7420cb9a9d672810826ff26 100644
--- a/src/objective-c/tests/InteropTests.m
+++ b/src/objective-c/tests/InteropTests.m
@@ -36,10 +36,10 @@
 #include <grpc/status.h>
 
 #import <Cronet/Cronet.h>
+#import <GRPCClient/GRPCCall+ChannelArg.h>
 #import <GRPCClient/GRPCCall+Tests.h>
 #import <GRPCClient/GRPCCall+Cronet.h>
 #import <ProtoRPC/ProtoRPC.h>
-#import <RemoteTest/Empty.pbobjc.h>
 #import <RemoteTest/Messages.pbobjc.h>
 #import <RemoteTest/Test.pbobjc.h>
 #import <RemoteTest/Test.pbrpc.h>
@@ -58,7 +58,7 @@
                  requestedResponseSize:(NSNumber *)responseSize {
   RMTStreamingOutputCallRequest *request = [self message];
   RMTResponseParameters *parameters = [RMTResponseParameters message];
-  parameters.size = responseSize.integerValue;
+  parameters.size = responseSize.intValue;
   [request.responseParametersArray addObject:parameters];
   request.payload.body = [NSMutableData dataWithLength:payloadSize.unsignedIntegerValue];
   return request;
@@ -80,7 +80,9 @@
 
 #pragma mark Tests
 
+#ifdef GRPC_COMPILE_WITH_CRONET
 static cronet_engine *cronetEngine = NULL;
+#endif
 
 @implementation InteropTests {
   RMTTestService *_service;
@@ -107,12 +109,12 @@ static cronet_engine *cronetEngine = NULL;
   XCTAssertNotNil(self.class.host);
   __weak XCTestExpectation *expectation = [self expectationWithDescription:@"EmptyUnary"];
 
-  RMTEmpty *request = [RMTEmpty message];
+  GPBEmpty *request = [GPBEmpty message];
 
-  [_service emptyCallWithRequest:request handler:^(RMTEmpty *response, NSError *error) {
+  [_service emptyCallWithRequest:request handler:^(GPBEmpty *response, NSError *error) {
     XCTAssertNil(error, @"Finished with unexpected error: %@", error);
 
-    id expectedResponse = [RMTEmpty message];
+    id expectedResponse = [GPBEmpty message];
     XCTAssertEqualObjects(response, expectedResponse);
 
     [expectation fulfill];
@@ -186,7 +188,7 @@ static cronet_engine *cronetEngine = NULL;
   RMTStreamingOutputCallRequest *request = [RMTStreamingOutputCallRequest message];
   for (NSNumber *size in expectedSizes) {
     RMTResponseParameters *parameters = [RMTResponseParameters message];
-    parameters.size = [size integerValue];
+    parameters.size = [size intValue];
     [request.responseParametersArray addObject:parameters];
   }
 
@@ -282,9 +284,10 @@ static cronet_engine *cronetEngine = NULL;
   // A buffered pipe to which we never write any value acts as a writer that just hangs.
   GRXBufferedPipe *requestsBuffer = [[GRXBufferedPipe alloc] init];
 
-  ProtoRPC *call = [_service RPCToStreamingInputCallWithRequestsWriter:requestsBuffer
-                                                               handler:^(RMTStreamingInputCallResponse *response,
-                                                                         NSError *error) {
+  GRPCProtoCall *call =
+      [_service RPCToStreamingInputCallWithRequestsWriter:requestsBuffer
+                                                  handler:^(RMTStreamingInputCallResponse *response,
+                                                            NSError *error) {
     XCTAssertEqual(error.code, GRPC_STATUS_CANCELLED);
     [expectation fulfill];
   }];
@@ -313,7 +316,7 @@ static cronet_engine *cronetEngine = NULL;
 
   [requestsBuffer writeValue:request];
 
-  __block ProtoRPC *call =
+  __block GRPCProtoCall *call =
       [_service RPCToFullDuplexCallWithRequestsWriter:requestsBuffer
                                          eventHandler:^(BOOL done,
                                                         RMTStreamingOutputCallResponse *response,
@@ -334,4 +337,28 @@ static cronet_engine *cronetEngine = NULL;
   [self waitForExpectationsWithTimeout:8 handler:nil];
 }
 
+- (void)testRPCAfterClosingOpenConnections {
+  XCTAssertNotNil(self.class.host);
+  __weak XCTestExpectation *expectation =
+      [self expectationWithDescription:@"RPC after closing connection"];
+
+  GPBEmpty *request = [GPBEmpty message];
+
+  [_service emptyCallWithRequest:request handler:^(GPBEmpty *response, NSError *error) {
+    XCTAssertNil(error, @"First RPC finished with unexpected error: %@", error);
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+    [GRPCCall closeOpenConnections];
+#pragma clang diagnostic pop
+
+    [_service emptyCallWithRequest:request handler:^(GPBEmpty *response, NSError *error) {
+      XCTAssertNil(error, @"Second RPC finished with unexpected error: %@", error);
+      [expectation fulfill];
+    }];
+  }];
+
+  [self waitForExpectationsWithTimeout:4 handler:nil];
+}
+
 @end
diff --git a/src/objective-c/tests/Podfile b/src/objective-c/tests/Podfile
index a7a88a3b9df9d4c570d2dc63da77f04a1c41ec5c..17b9740d52e4c1600be28fc20ba5260ad7710ad8 100644
--- a/src/objective-c/tests/Podfile
+++ b/src/objective-c/tests/Podfile
@@ -3,34 +3,79 @@ platform :ios, '8.0'
 
 install! 'cocoapods', :deterministic_uuids => false
 
-def shared_pods
-	pod 'Protobuf', :path => "../../../third_party/protobuf"
-	pod 'BoringSSL', :podspec => ".."
-	pod 'CronetFramework', :podspec => ".."
-	pod 'gRPC', :path => "../../.."
-	pod 'RemoteTest', :path => "RemoteTestClient"
-end
+# Location of gRPC's repo root relative to this file.
+GRPC_LOCAL_SRC = '../../..'
 
-target 'Tests' do
-	shared_pods
-end
+# Install the dependencies in the main target plus all test targets.
+%w(
+  Tests
+  AllTests
+  RxLibraryUnitTests
+  InteropTestsRemote
+  InteropTestsLocalSSL
+  InteropTestsLocalCleartext
+).each do |target_name|
+  target target_name do
+    pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf", :inhibit_warnings => true
 
-target 'AllTests' do
-	shared_pods
-end
+    pod '!ProtoCompiler',            :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
+    pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c"
 
-target 'RxLibraryUnitTests' do
-	shared_pods
-end
+    pod 'BoringSSL',       :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true
+    pod 'CronetFramework', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
+
+    pod 'gRPC',           :path => GRPC_LOCAL_SRC
+    pod 'gRPC-Core',      :path => GRPC_LOCAL_SRC
+    pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
+    pod 'gRPC-ProtoRPC',  :path => GRPC_LOCAL_SRC
 
-target 'InteropTestsRemote' do
-	shared_pods
+    pod 'RemoteTest', :path => "RemoteTestClient"
+  end
 end
 
-target 'InteropTestsLocalSSL' do
-	shared_pods
+# gRPC-Core.podspec needs to be modified to be successfully used for local development. A Podfile's
+# pre_install hook lets us do that. The block passed to it runs after the podspecs are downloaded
+# and before they are installed in the user project.
+#
+# This podspec searches for the gRPC core library headers under "$(PODS_ROOT)/gRPC-Core", where
+# Cocoapods normally places the downloaded sources. When doing local development of the libraries,
+# though, Cocoapods just takes the sources from whatever directory was specified using `:path`, and
+# doesn't copy them under $(PODS_ROOT). When using static libraries, one can sometimes rely on the
+# symbolic links to the pods headers that Cocoapods creates under "$(PODS_ROOT)/Headers". But those
+# aren't created when using dynamic frameworks. So our solution is to modify the podspec on the fly
+# to point at the local directory where the sources are.
+#
+# TODO(jcanizales): Send a PR to Cocoapods to get rid of this need.
+pre_install do |installer|
+  # This is the gRPC-Core podspec object, as initialized by its podspec file.
+  grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
+
+  # Copied from gRPC-Core.podspec, except for the adjusted src_root:
+  src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
+  grpc_core_spec.pod_target_xcconfig = {
+    'GRPC_SRC_ROOT' => src_root,
+    'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
+    'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
+    # If we don't set these two settings, `include/grpc/support/time.h` and
+    # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
+    # build.
+    'USE_HEADERMAP' => 'NO',
+    'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+  }
 end
 
-target 'InteropTestsLocalCleartext' do
-	shared_pods
+post_install do |installer|
+  installer.pods_project.targets.each do |target|
+    target.build_configurations.each do |config|
+      config.build_settings['GCC_TREAT_WARNINGS_AS_ERRORS'] = 'YES'
+    end
+    if target.name == 'gRPC-Core'
+      target.build_configurations.each do |config|
+        # TODO(zyc): Remove this setting after the issue is resolved
+        # GPR_UNREACHABLE_CODE causes "Control may reach end of non-void
+        # function" warning
+        config.build_settings['GCC_WARN_ABOUT_RETURN_TYPE'] = 'NO'
+      end
+    end
+  end
 end
diff --git a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
index e1fd9910389565b90b2e6f729cd063933c67b865..7d84a5ae4dc73a4d345095cd66511ca501edb2b5 100644
--- a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
+++ b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
@@ -11,25 +11,41 @@ Pod::Spec.new do |s|
   s.osx.deployment_target = '10.9'
 
   # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
+  s.dependency "!ProtoCompiler-gRPCPlugin", "~> 0.14"
+
+  repo_root = '../../../..'
+  bin_dir = "#{repo_root}/bins/$CONFIG"
+
+  protoc = "#{bin_dir}/protobuf/protoc"
+  well_known_types_dir = "#{repo_root}/third_party/protobuf/src"
+  plugin = "#{bin_dir}/grpc_objective_c_plugin"
+
   s.prepare_command = <<-CMD
-    BINDIR=../../../../bins/$CONFIG
-    PROTOC=$BINDIR/protobuf/protoc
-    PLUGIN=$BINDIR/grpc_objective_c_plugin
-    $PROTOC --plugin=protoc-gen-grpc=$PLUGIN --objc_out=. --grpc_out=. *.proto
+    #{protoc} \
+        --plugin=protoc-gen-grpc=#{plugin} \
+        --objc_out=. \
+        --grpc_out=. \
+        -I . \
+        -I #{well_known_types_dir} \
+        *.proto
   CMD
 
   s.subspec "Messages" do |ms|
     ms.source_files = "*.pbobjc.{h,m}"
     ms.header_mappings_dir = "."
     ms.requires_arc = false
-    ms.dependency "Protobuf", "~> 3.0.0-alpha-4"
+    ms.dependency "Protobuf"
+    # This is needed by all pods that depend on Protobuf:
+    ms.pod_target_xcconfig = {
+      'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+    }
   end
 
   s.subspec "Services" do |ss|
     ss.source_files = "*.pbrpc.{h,m}"
     ss.header_mappings_dir = "."
     ss.requires_arc = true
-    ss.dependency "gRPC", "~> 0.12"
+    ss.dependency "gRPC-ProtoRPC"
     ss.dependency "#{s.name}/Messages"
   end
 end
diff --git a/src/objective-c/tests/RemoteTestClient/test.proto b/src/objective-c/tests/RemoteTestClient/test.proto
index 514c3b80955fc1c9cf97513d909df6923dd78d72..5c359c5c1296e7270e0bbfa637128cc1ab50285f 100644
--- a/src/objective-c/tests/RemoteTestClient/test.proto
+++ b/src/objective-c/tests/RemoteTestClient/test.proto
@@ -31,7 +31,7 @@
 // of unary/streaming requests/responses.
 syntax = "proto3";
 
-import "empty.proto";
+import "google/protobuf/empty.proto";
 import "messages.proto";
 
 package grpc.testing;
@@ -42,7 +42,7 @@ option objc_class_prefix = "RMT";
 // performance with various types of payload.
 service TestService {
   // One empty request followed by one empty response.
-  rpc EmptyCall(grpc.testing.Empty) returns (grpc.testing.Empty);
+  rpc EmptyCall(google.protobuf.Empty) returns (google.protobuf.Empty);
 
   // One request followed by one response.
   rpc UnaryCall(SimpleRequest) returns (SimpleResponse);
diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
index 89e0ea60b9e562350a0c2f288978ab64b657f147..f9389a4977fe695e3daf9d7beabceb0e1834efca 100644
--- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
+++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
@@ -274,12 +274,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 63423F4D1B150A5F006CF63C /* Build configuration list for PBXNativeTarget "AllTests" */;
 			buildPhases = (
-				914ADDD7106BA9BB8A7E569F /* 📦 Check Pods Manifest.lock */,
+				914ADDD7106BA9BB8A7E569F /* [CP] Check Pods Manifest.lock */,
 				63423F401B150A5F006CF63C /* Sources */,
 				63423F411B150A5F006CF63C /* Frameworks */,
 				63423F421B150A5F006CF63C /* Resources */,
-				A441F71824DCB9D0CA297748 /* 📦 Copy Pods Resources */,
-				5F14F59509E10C2852014F9E /* 📦 Embed Pods Frameworks */,
+				A441F71824DCB9D0CA297748 /* [CP] Copy Pods Resources */,
+				5F14F59509E10C2852014F9E /* [CP] Embed Pods Frameworks */,
 			);
 			buildRules = (
 			);
@@ -295,11 +295,11 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 635697DB1B14FC11007A7283 /* Build configuration list for PBXNativeTarget "Tests" */;
 			buildPhases = (
-				796680C7599CB4ED736DD62A /* 📦 Check Pods Manifest.lock */,
+				796680C7599CB4ED736DD62A /* [CP] Check Pods Manifest.lock */,
 				635697C31B14FC11007A7283 /* Sources */,
 				635697C41B14FC11007A7283 /* Frameworks */,
 				635697C51B14FC11007A7283 /* CopyFiles */,
-				AEEBFC914CBAEE347382E8C4 /* 📦 Copy Pods Resources */,
+				AEEBFC914CBAEE347382E8C4 /* [CP] Copy Pods Resources */,
 			);
 			buildRules = (
 			);
@@ -314,12 +314,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 63DC841B1BE15179000708E8 /* Build configuration list for PBXNativeTarget "RxLibraryUnitTests" */;
 			buildPhases = (
-				B2986CEEE8CDD4901C97598B /* 📦 Check Pods Manifest.lock */,
+				B2986CEEE8CDD4901C97598B /* [CP] Check Pods Manifest.lock */,
 				63DC840F1BE15179000708E8 /* Sources */,
 				63DC84101BE15179000708E8 /* Frameworks */,
 				63DC84111BE15179000708E8 /* Resources */,
-				4F5690DC0E6AD6663FE78B8B /* 📦 Embed Pods Frameworks */,
-				C977426A8727267BBAC7D48E /* 📦 Copy Pods Resources */,
+				4F5690DC0E6AD6663FE78B8B /* [CP] Embed Pods Frameworks */,
+				C977426A8727267BBAC7D48E /* [CP] Copy Pods Resources */,
 			);
 			buildRules = (
 			);
@@ -335,12 +335,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 63DC842B1BE15267000708E8 /* Build configuration list for PBXNativeTarget "InteropTestsRemote" */;
 			buildPhases = (
-				4C406327D3907A5E5FBA8AC9 /* 📦 Check Pods Manifest.lock */,
+				4C406327D3907A5E5FBA8AC9 /* [CP] Check Pods Manifest.lock */,
 				63DC841F1BE15267000708E8 /* Sources */,
 				63DC84201BE15267000708E8 /* Frameworks */,
 				63DC84211BE15267000708E8 /* Resources */,
-				900B6EDD4D16BE7D765C3885 /* 📦 Embed Pods Frameworks */,
-				C2E09DC4BD239F71160F0CC1 /* 📦 Copy Pods Resources */,
+				900B6EDD4D16BE7D765C3885 /* [CP] Embed Pods Frameworks */,
+				C2E09DC4BD239F71160F0CC1 /* [CP] Copy Pods Resources */,
 			);
 			buildRules = (
 			);
@@ -356,12 +356,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 63DC843C1BE15294000708E8 /* Build configuration list for PBXNativeTarget "InteropTestsLocalSSL" */;
 			buildPhases = (
-				5C20DCCB71C3991E6FE78C22 /* 📦 Check Pods Manifest.lock */,
+				5C20DCCB71C3991E6FE78C22 /* [CP] Check Pods Manifest.lock */,
 				63DC84301BE15294000708E8 /* Sources */,
 				63DC84311BE15294000708E8 /* Frameworks */,
 				63DC84321BE15294000708E8 /* Resources */,
-				C591129ACE9F6CC5EE03FCDE /* 📦 Embed Pods Frameworks */,
-				693DD0B453431D64EA24FD66 /* 📦 Copy Pods Resources */,
+				C591129ACE9F6CC5EE03FCDE /* [CP] Embed Pods Frameworks */,
+				693DD0B453431D64EA24FD66 /* [CP] Copy Pods Resources */,
 			);
 			buildRules = (
 			);
@@ -377,12 +377,12 @@
 			isa = PBXNativeTarget;
 			buildConfigurationList = 63DC844B1BE152B5000708E8 /* Build configuration list for PBXNativeTarget "InteropTestsLocalCleartext" */;
 			buildPhases = (
-				7418AC7B3844B29E48D24FC7 /* 📦 Check Pods Manifest.lock */,
+				7418AC7B3844B29E48D24FC7 /* [CP] Check Pods Manifest.lock */,
 				63DC843F1BE152B5000708E8 /* Sources */,
 				63DC84401BE152B5000708E8 /* Frameworks */,
 				63DC84411BE152B5000708E8 /* Resources */,
-				A8E3AC66DF770B774114A30E /* 📦 Embed Pods Frameworks */,
-				8AD3130D3C58A0FB32FF2A36 /* 📦 Copy Pods Resources */,
+				A8E3AC66DF770B774114A30E /* [CP] Embed Pods Frameworks */,
+				8AD3130D3C58A0FB32FF2A36 /* [CP] Copy Pods Resources */,
 			);
 			buildRules = (
 			);
@@ -486,14 +486,14 @@
 /* End PBXResourcesBuildPhase section */
 
 /* Begin PBXShellScriptBuildPhase section */
-		4C406327D3907A5E5FBA8AC9 /* 📦 Check Pods Manifest.lock */ = {
+		4C406327D3907A5E5FBA8AC9 /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -501,14 +501,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		4F5690DC0E6AD6663FE78B8B /* 📦 Embed Pods Frameworks */ = {
+		4F5690DC0E6AD6663FE78B8B /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -516,14 +516,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		5C20DCCB71C3991E6FE78C22 /* 📦 Check Pods Manifest.lock */ = {
+		5C20DCCB71C3991E6FE78C22 /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -531,14 +531,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		5F14F59509E10C2852014F9E /* 📦 Embed Pods Frameworks */ = {
+		5F14F59509E10C2852014F9E /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -546,14 +546,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-AllTests/Pods-AllTests-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		693DD0B453431D64EA24FD66 /* 📦 Copy Pods Resources */ = {
+		693DD0B453431D64EA24FD66 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -561,14 +561,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		7418AC7B3844B29E48D24FC7 /* 📦 Check Pods Manifest.lock */ = {
+		7418AC7B3844B29E48D24FC7 /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -576,14 +576,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		796680C7599CB4ED736DD62A /* 📦 Check Pods Manifest.lock */ = {
+		796680C7599CB4ED736DD62A /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -591,14 +591,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		8AD3130D3C58A0FB32FF2A36 /* 📦 Copy Pods Resources */ = {
+		8AD3130D3C58A0FB32FF2A36 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -606,14 +606,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		900B6EDD4D16BE7D765C3885 /* 📦 Embed Pods Frameworks */ = {
+		900B6EDD4D16BE7D765C3885 /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -621,14 +621,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		914ADDD7106BA9BB8A7E569F /* 📦 Check Pods Manifest.lock */ = {
+		914ADDD7106BA9BB8A7E569F /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -636,14 +636,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		A441F71824DCB9D0CA297748 /* 📦 Copy Pods Resources */ = {
+		A441F71824DCB9D0CA297748 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -651,14 +651,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-AllTests/Pods-AllTests-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		A8E3AC66DF770B774114A30E /* 📦 Embed Pods Frameworks */ = {
+		A8E3AC66DF770B774114A30E /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -666,14 +666,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		AEEBFC914CBAEE347382E8C4 /* 📦 Copy Pods Resources */ = {
+		AEEBFC914CBAEE347382E8C4 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -681,14 +681,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-Tests/Pods-Tests-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		B2986CEEE8CDD4901C97598B /* 📦 Check Pods Manifest.lock */ = {
+		B2986CEEE8CDD4901C97598B /* [CP] Check Pods Manifest.lock */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Check Pods Manifest.lock";
+			name = "[CP] Check Pods Manifest.lock";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -696,14 +696,14 @@
 			shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n    cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n    exit 1\nfi\n";
 			showEnvVarsInLog = 0;
 		};
-		C2E09DC4BD239F71160F0CC1 /* 📦 Copy Pods Resources */ = {
+		C2E09DC4BD239F71160F0CC1 /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -711,14 +711,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote-resources.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		C591129ACE9F6CC5EE03FCDE /* 📦 Embed Pods Frameworks */ = {
+		C591129ACE9F6CC5EE03FCDE /* [CP] Embed Pods Frameworks */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Embed Pods Frameworks";
+			name = "[CP] Embed Pods Frameworks";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -726,14 +726,14 @@
 			shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL-frameworks.sh\"\n";
 			showEnvVarsInLog = 0;
 		};
-		C977426A8727267BBAC7D48E /* 📦 Copy Pods Resources */ = {
+		C977426A8727267BBAC7D48E /* [CP] Copy Pods Resources */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
 			);
 			inputPaths = (
 			);
-			name = "📦 Copy Pods Resources";
+			name = "[CP] Copy Pods Resources";
 			outputPaths = (
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -893,6 +893,7 @@
 					"$(inherited)",
 				);
 				GCC_SYMBOLS_PRIVATE_EXTERN = NO;
+				GCC_TREAT_WARNINGS_AS_ERRORS = YES;
 				GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
 				GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
 				GCC_WARN_UNDECLARED_SELECTOR = YES;
@@ -929,6 +930,7 @@
 				ENABLE_STRICT_OBJC_MSGSEND = YES;
 				GCC_C_LANGUAGE_STANDARD = gnu99;
 				GCC_NO_COMMON_BLOCKS = YES;
+				GCC_TREAT_WARNINGS_AS_ERRORS = YES;
 				GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
 				GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
 				GCC_WARN_UNDECLARED_SELECTOR = YES;
@@ -946,6 +948,7 @@
 			isa = XCBuildConfiguration;
 			baseConfigurationReference = 060EF32D7EC0DF67ED617507 /* Pods-Tests.debug.xcconfig */;
 			buildSettings = {
+				GCC_TREAT_WARNINGS_AS_ERRORS = YES;
 				PRODUCT_NAME = "$(TARGET_NAME)";
 				SKIP_INSTALL = YES;
 			};
@@ -955,6 +958,7 @@
 			isa = XCBuildConfiguration;
 			baseConfigurationReference = E6733B838B28453434B556E2 /* Pods-Tests.release.xcconfig */;
 			buildSettings = {
+				GCC_TREAT_WARNINGS_AS_ERRORS = YES;
 				PRODUCT_NAME = "$(TARGET_NAME)";
 				SKIP_INSTALL = YES;
 			};
diff --git a/src/objective-c/tests/build_example_test.sh b/src/objective-c/tests/build_example_test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5c3766b4c0bd966a3d27734f6166ce1ec69d3937
--- /dev/null
+++ b/src/objective-c/tests/build_example_test.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Don't run this script standalone. Instead, run from the repository root:
+# ./tools/run_tests/run_tests.py -l objc
+
+set -eo pipefail
+
+cd `dirname $0`
+
+BINDIR=`pwd`/../../../bins/$CONFIG
+TMP_PATH=$PATH
+
+# If `protoc` is not found, add bins/$CONFIG/protobuf/protoc to the search path
+hash protoc 2>/dev/null || TMP_PATH=$BINDIR/protobuf:$TMP_PATH
+
+# If `protoc-gen-objcgrpc` is not found, make a symlink from
+# bins/$CONGIF/grpc_objective_c_plugin and add it to the search path
+PATH=$TMP_PATH hash protoc-gen-objcgrpc 2>/dev/null || {
+  ln -sf $BINDIR/grpc_objective_c_plugin $BINDIR/protoc-gen-objcgrpc
+  TMP_PATH=$BINDIR:$TMP_PATH
+}
+
+SCHEME=HelloWorld                              \
+  EXAMPLE_PATH=examples/objective-c/helloworld \
+  PATH=$TMP_PATH                               \
+  ./build_one_example.sh
+
+SCHEME=RouteGuideClient                         \
+  EXAMPLE_PATH=examples/objective-c/route_guide \
+  PATH=$TMP_PATH                                \
+  ./build_one_example.sh
+
+SCHEME=AuthSample                               \
+  EXAMPLE_PATH=examples/objective-c/auth_sample \
+  PATH=$TMP_PATH                                \
+  ./build_one_example.sh
+
+SCHEME=Sample                                  \
+  EXAMPLE_PATH=src/objective-c/examples/Sample \
+  PATH=$TMP_PATH                               \
+  ./build_one_example.sh
+
+SCHEME=SwiftSample                                  \
+  EXAMPLE_PATH=src/objective-c/examples/SwiftSample \
+  PATH=$TMP_PATH                                    \
+  ./build_one_example.sh
diff --git a/src/python/grpcio/grpc/framework/core/_utilities.py b/src/objective-c/tests/build_one_example.sh
old mode 100644
new mode 100755
similarity index 65%
rename from src/python/grpcio/grpc/framework/core/_utilities.py
rename to src/objective-c/tests/build_one_example.sh
index abedc727e4a84883b318b35f4014538d25522475..24fb8b7bab44df34a5c5bccd7884b9f53843bd19
--- a/src/python/grpcio/grpc/framework/core/_utilities.py
+++ b/src/objective-c/tests/build_one_example.sh
@@ -1,4 +1,5 @@
-# Copyright 2015, Google Inc.
+#!/bin/bash
+# Copyright 2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,28 +28,35 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Package-internal utilities."""
+# Don't run this script standalone. Instead, run from the repository root:
+# ./tools/run_tests/run_tests.py -l objc
 
-import collections
+set -e
 
-from grpc.framework.interfaces.base import base
+# Params:
+# EXAMPLE_PATH - directory of the example
+# SCHEME - scheme of the example, used by xcodebuild
 
+# CocoaPods requires the terminal to be using UTF-8 encoding.
+export LANG=en_US.UTF-8
 
-class ServicerPackage(
-    collections.namedtuple(
-        'ServicerPackage', ('servicer', 'default_timeout', 'maximum_timeout'))):
-  """A trivial bundle class.
+cd `dirname $0`/../../..
 
-  Attributes:
-    servicer: A base.Servicer.
-    default_timeout: A float indicating the length of time in seconds to allow
-      for an operation invoked without a timeout.
-    maximum_timeout: A float indicating the maximum length of time in seconds to
-      allow for an operation.
-  """
+cd $EXAMPLE_PATH
 
+# clean the directory
+rm -rf Pods
+rm -rf $SCHEME.xcworkspace
+rm -f Podfile.lock
 
-class Outcome(
-    base.Outcome,
-    collections.namedtuple('Outcome', ('kind', 'code', 'details',))):
-  """A trivial implementation of base.Outcome."""
+pod install
+
+set -o pipefail
+XCODEBUILD_FILTER='(^===|^\*\*|\bfatal\b|\berror\b|\bwarning\b|\bfail)'
+xcodebuild \
+    clean build \
+    -workspace *.xcworkspace \
+    -scheme $SCHEME \
+    -destination name="iPhone 6" \
+    | egrep "$XCODEBUILD_FILTER" \
+    | egrep -v "(GPBDictionary|GPBArray)" -
diff --git a/src/php/README.md b/src/php/README.md
index cf8f2c11b0fa5e1aca376ffd6f7212e94ce41095..8abedc40a3b38a672c11ddf932831a610e254703 100644
--- a/src/php/README.md
+++ b/src/php/README.md
@@ -5,7 +5,7 @@ This directory contains source code for PHP implementation of gRPC layered on sh
 
 #Status
 
-Beta
+GA
 
 ## Environment
 
@@ -43,7 +43,7 @@ $ sudo mv phpunit-old.phar /usr/bin/phpunit
 Install the gRPC PHP extension
 
 ```sh
-sudo pecl install grpc-beta
+sudo pecl install grpc
 ```
 
 This will compile and install the gRPC PHP extension into the standard PHP extension directory. You should be able to run the [unit tests](#unit-tests), with the PHP extension installed.
@@ -58,7 +58,7 @@ To run tests with generated stub code from `.proto` files, you will also need th
 Clone this repository
 
 ```sh
-$ git clone https://github.com/grpc/grpc.git
+$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
 ```
 
 Build and install the gRPC C core library
@@ -75,7 +75,7 @@ $ sudo make install
 Install the gRPC PHP extension from PECL
 
 ```sh
-$ sudo pecl install grpc-beta
+$ sudo pecl install grpc
 ```
 
 Or, compile from source
@@ -101,7 +101,7 @@ extension=grpc.so
 You will need the source code to run tests
 
 ```sh
-$ git clone https://github.com/grpc/grpc.git
+$ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc
 $ cd grpc
 $ git pull --recurse-submodules && git submodule update --init --recursive
 ```
@@ -148,7 +148,7 @@ Alternatively, you can download `protoc` binaries from [the protocol buffers Git
 You need to install `protoc-gen-php` to generate stub class `.php` files from service definition `.proto` files.
 
 ```sh
-$ cd grpc/src/php/vendor/datto/protobuf-php # if you had run `composer install` in the previous step
+$ cd grpc/src/php/vendor/stanley-cheung/protobuf-php # if you had run `composer install` in the previous step
 
 OR
 
diff --git a/src/php/composer.json b/src/php/composer.json
index 01674a25db8168143d0e255e2c8f9e7eca5ce1d7..ca8e085fca13d4e85c815b6bf988bb7ad391d774 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -2,19 +2,13 @@
   "name": "grpc/grpc",
   "type": "library",
   "description": "gRPC library for PHP",
-  "version": "0.14.0",
   "keywords": ["rpc"],
   "homepage": "http://grpc.io",
   "license": "BSD-3-Clause",
-  "repositories": [
-    {
-      "type": "vcs",
-      "url": "https://github.com/stanley-cheung/Protobuf-PHP"
-    }
-  ],
+  "version": "1.0.0",
   "require": {
     "php": ">=5.5.0",
-    "datto/protobuf-php": "dev-master",
+    "stanley-cheung/protobuf-php": "dev-master",
     "google/auth": "v0.7"
   },
   "autoload": {
diff --git a/src/php/ext/grpc/byte_buffer.c b/src/php/ext/grpc/byte_buffer.c
index 7a726de5db48b1b103ad360fbf1bd1d86728a376..3be1429f13c61b86ddef3da8cbec0e026ecc74cd 100644
--- a/src/php/ext/grpc/byte_buffer.c
+++ b/src/php/ext/grpc/byte_buffer.c
@@ -58,22 +58,20 @@ grpc_byte_buffer *string_to_byte_buffer(char *string, size_t length) {
 
 void byte_buffer_to_string(grpc_byte_buffer *buffer, char **out_string,
                            size_t *out_length) {
-  if (buffer == NULL) {
+  grpc_byte_buffer_reader reader;
+  if (buffer == NULL || !grpc_byte_buffer_reader_init(&reader, buffer)) {
+    /* TODO(dgq): distinguish between the error cases. */
     *out_string = NULL;
     *out_length = 0;
     return;
   }
-  size_t length = grpc_byte_buffer_length(buffer);
+
+  gpr_slice slice = grpc_byte_buffer_reader_readall(&reader);
+  size_t length = GPR_SLICE_LENGTH(slice);
   char *string = ecalloc(length + 1, sizeof(char));
-  size_t offset = 0;
-  grpc_byte_buffer_reader reader;
-  grpc_byte_buffer_reader_init(&reader, buffer);
-  gpr_slice next;
-  while (grpc_byte_buffer_reader_next(&reader, &next) != 0) {
-    memcpy(string + offset, GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next));
-    offset += GPR_SLICE_LENGTH(next);
-    gpr_slice_unref(next);
-  }
+  memcpy(string, GPR_SLICE_START_PTR(slice), length);
+  gpr_slice_unref(slice);
+
   *out_string = string;
   *out_length = length;
 }
diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c
index 0ec502262de52d18b61289f009c6d2bfe2b89370..2cd45f10dc45bae6485ae70539f53425f7f45af8 100644
--- a/src/php/ext/grpc/call.c
+++ b/src/php/ext/grpc/call.c
@@ -248,6 +248,7 @@ PHP_METHOD(Call, __construct) {
   call->wrapped = grpc_channel_create_call(
       channel->wrapped, NULL, GRPC_PROPAGATE_DEFAULTS, completion_queue, method,
       host_override, deadline->wrapped, NULL);
+  call->owned = true;
 }
 
 /**
diff --git a/src/php/ext/grpc/channel.c b/src/php/ext/grpc/channel.c
index 9f0431908f97df6f794868109551e210bc210db1..8d94c59683e939a29bb6be038f1b6df0a2bd9785 100644
--- a/src/php/ext/grpc/channel.c
+++ b/src/php/ext/grpc/channel.c
@@ -48,7 +48,6 @@
 #include <stdbool.h>
 
 #include <grpc/grpc.h>
-#include <grpc/support/log.h>
 #include <grpc/grpc_security.h>
 
 #include "completion_queue.h"
@@ -172,7 +171,6 @@ PHP_METHOD(Channel, __construct) {
   if (creds == NULL) {
     channel->wrapped = grpc_insecure_channel_create(target, &args, NULL);
   } else {
-    gpr_log(GPR_DEBUG, "Initialized secure channel");
     channel->wrapped =
         grpc_secure_channel_create(creds->wrapped, target, &args, NULL);
   }
diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c
index f4cb5b28cc8c5e874a24b9f991b6646d16cc8df8..449ba3cd47b8c7d51d296ab3ba0822114626c2fa 100644
--- a/src/php/ext/grpc/php_grpc.c
+++ b/src/php/ext/grpc/php_grpc.c
@@ -248,6 +248,8 @@ PHP_MSHUTDOWN_FUNCTION(grpc) {
   /* uncomment this line if you have INI entries
   UNREGISTER_INI_ENTRIES();
   */
+  // WARNING: This function IS being called by PHP when the extension
+  // is unloaded but the logs were somehow suppressed.
   grpc_shutdown_timeval(TSRMLS_C);
   grpc_php_shutdown_completion_queue(TSRMLS_C);
   grpc_shutdown();
diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c
index 6df2e4f97821d1af8e023caf4e4061e0a26bf48b..c13e7cd1f92409a0be31a580bd25829e3f203fff 100644
--- a/src/php/ext/grpc/server.c
+++ b/src/php/ext/grpc/server.c
@@ -48,7 +48,6 @@
 #include <stdbool.h>
 
 #include <grpc/grpc.h>
-#include <grpc/support/log.h>
 #include <grpc/grpc_security.h>
 
 #include "completion_queue.h"
diff --git a/src/proto/grpc/binary_log/v1alpha/log.proto b/src/proto/grpc/binary_log/v1alpha/log.proto
index 83166cd4104bd5254abb467e558f469c4a019bbc..46656109bc92bdb4fb9ce2a627c91f0c0e2020fe 100644
--- a/src/proto/grpc/binary_log/v1alpha/log.proto
+++ b/src/proto/grpc/binary_log/v1alpha/log.proto
@@ -29,20 +29,20 @@
 
 syntax = "proto3";
 
-import "google/protobuf/timestamp.proto"
+import "google/protobuf/timestamp.proto";
 
 package grpc.binary_log.v1alpha;
 
 enum Direction {
-  SERVER_SEND;
-  SERVER_RECV;
-  CLIENT_SEND;
-  CLIENT_RECV;
+  SERVER_SEND = 0;
+  SERVER_RECV = 1;
+  CLIENT_SEND = 2;
+  CLIENT_RECV = 3;
 }
 
 message KeyValuePair {
-  string key;
-  string value;
+  string key = 1;
+  string value = 2;
 }
 
 // Any sort of metadata that may be sent in either direction during a call
diff --git a/src/proto/grpc/testing/messages.proto b/src/proto/grpc/testing/messages.proto
index e1090156ab483de223466eb8a5aa486014b55a17..64998c2f2310230a6bd7076a2c116c7a386d4511 100644
--- a/src/proto/grpc/testing/messages.proto
+++ b/src/proto/grpc/testing/messages.proto
@@ -34,17 +34,24 @@ syntax = "proto3";
 
 package grpc.testing;
 
+// TODO(dgq): Go back to using well-known types once
+// https://github.com/grpc/grpc/issues/6980 has been fixed.
+// import "google/protobuf/wrappers.proto";
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// DEPRECATED, don't use. To be removed shortly.
 // The type of payload that should be returned.
 enum PayloadType {
   // Compressable text format.
   COMPRESSABLE = 0;
-
-  // Uncompressable binary format.
-  UNCOMPRESSABLE = 1;
 }
 
 // A block of data, to simply increase gRPC message size.
 message Payload {
+  // DEPRECATED, don't use. To be removed shortly.
   // The type of data in body.
   PayloadType type = 1;
   // Primary contents of payload.
@@ -60,12 +67,12 @@ message EchoStatus {
 
 // Unary request.
 message SimpleRequest {
+  // DEPRECATED, don't use. To be removed shortly.
   // Desired payload type in the response from the server.
   // If response_type is RANDOM, server randomly chooses one from other formats.
   PayloadType response_type = 1;
 
   // Desired payload size in the response from the server.
-  // If response_type is COMPRESSABLE, this denotes the size before compression.
   int32 response_size = 2;
 
   // Optional input payload sent along with the request.
@@ -77,11 +84,17 @@ message SimpleRequest {
   // Whether SimpleResponse should include OAuth scope.
   bool fill_oauth_scope = 5;
 
-  // Whether to request the server to compress the response.
-  bool request_compressed_response = 6;
+  // Whether to request the server to compress the response. This field is
+  // "nullable" in order to interoperate seamlessly with clients not able to
+  // implement the full compression tests by introspecting the call to verify
+  // the response's compression status.
+  BoolValue response_compressed = 6;
 
   // Whether server should return a given status
   EchoStatus response_status = 7;
+
+  // Whether the server should expect this request to be compressed.
+  BoolValue expect_compressed = 8;
 }
 
 // Unary response, as configured by the request.
@@ -100,6 +113,12 @@ message StreamingInputCallRequest {
   // Optional input payload sent along with the request.
   Payload payload = 1;
 
+  // Whether the server should expect this request to be compressed. This field
+  // is "nullable" in order to interoperate seamlessly with servers not able to
+  // implement the full compression tests by introspecting the call to verify
+  // the request's compression status.
+  BoolValue expect_compressed = 2;
+
   // Not expecting any payload from the response.
 }
 
@@ -112,16 +131,22 @@ message StreamingInputCallResponse {
 // Configuration for a particular response.
 message ResponseParameters {
   // Desired payload sizes in responses from the server.
-  // If response_type is COMPRESSABLE, this denotes the size before compression.
   int32 size = 1;
 
   // Desired interval between consecutive responses in the response stream in
   // microseconds.
   int32 interval_us = 2;
+
+  // Whether to request the server to compress the response. This field is
+  // "nullable" in order to interoperate seamlessly with clients not able to
+  // implement the full compression tests by introspecting the call to verify
+  // the response's compression status.
+  BoolValue compressed = 3;
 }
 
 // Server-streaming request.
 message StreamingOutputCallRequest {
+  // DEPRECATED, don't use. To be removed shortly.
   // Desired payload type in the response from the server.
   // If response_type is RANDOM, the payload from each response in the stream
   // might be of different types. This is to simulate a mixed type of payload
@@ -134,9 +159,6 @@ message StreamingOutputCallRequest {
   // Optional input payload sent along with the request.
   Payload payload = 3;
 
-  // Whether to request the server to compress the response.
-  bool request_compressed_response = 6;
-
   // Whether server should return a given status
   EchoStatus response_status = 7;
 }
diff --git a/src/python/grpcio/.gitignore b/src/python/grpcio/.gitignore
index 6e5e65096c8403b24b3f4de6f34c8f6f85a83dc4..7cd8fab2735b38a27af4459c33dab880a57cb512 100644
--- a/src/python/grpcio/.gitignore
+++ b/src/python/grpcio/.gitignore
@@ -9,7 +9,6 @@ dist/
 .coverage
 .coverage.*
 .cache/
-.tox/
 nosetests.xml
 doc/
 _grpcio_metadata.py
diff --git a/src/python/grpcio/README.rst b/src/python/grpcio/README.rst
index afc4fe6a37059aad67c3a733c5c5df1a3dc782bc..3fc318539ea5d2000b8add8ebcd5ce8ad6cfb902 100644
--- a/src/python/grpcio/README.rst
+++ b/src/python/grpcio/README.rst
@@ -46,7 +46,7 @@ package named :code:`python-dev`).
 ::
 
   $ export REPO_ROOT=grpc  # REPO_ROOT can be any directory of your choice
-  $ git clone https://github.com/grpc/grpc.git $REPO_ROOT
+  $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc $REPO_ROOT
   $ cd $REPO_ROOT
   $ git submodule update --init
 
diff --git a/src/python/grpcio/_unixccompiler_patch.py b/src/python/grpcio/_unixccompiler_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ce5d63e981a1893176c72213918c05b41f3325a
--- /dev/null
+++ b/src/python/grpcio/_unixccompiler_patch.py
@@ -0,0 +1,73 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Covers inadequacies in distutils."""
+
+from distutils import ccompiler
+from distutils import errors
+from distutils import unixccompiler
+import os
+import os.path
+import shutil
+import sys
+import tempfile
+
+def _unix_commandfile_spawn(self, command):
+  """Wrapper around distutils.util.spawn that attempts to use command files.
+
+  Meant to replace the CCompiler method `spawn` on UnixCCompiler and its
+  derivatives (e.g. the MinGW32 compiler).
+
+  Some commands like `gcc` (and friends like `clang`) support command files to
+  work around shell command length limits.
+  """
+  command_base = os.path.basename(command[0].strip())
+  if command_base == 'ccache':
+    command_base = command[:2]
+    command_args = command[2:]
+  elif command_base.startswith('ccache') or command_base in ['gcc', 'clang', 'clang++', 'g++']:
+    command_base = command[:1]
+    command_args = command[1:]
+  else:
+    return ccompiler.CCompiler.spawn(self, command)
+  temporary_directory = tempfile.mkdtemp()
+  command_filename = os.path.abspath(os.path.join(temporary_directory, 'command'))
+  with open(command_filename, 'w') as command_file:
+    escaped_args = [arg.replace('\\', '\\\\') for arg in command_args]
+    command_file.write(' '.join(escaped_args))
+  modified_command = command_base + ['@{}'.format(command_filename)]
+  result = ccompiler.CCompiler.spawn(self, modified_command)
+  shutil.rmtree(temporary_directory)
+  return result
+
+
+def monkeypatch_unix_compiler():
+  """Monkeypatching is dumb, but it's either that or we become maintainers of
+     something much, much bigger."""
+  unixccompiler.UnixCCompiler.spawn = _unix_commandfile_spawn
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 3e974eba0a344862f5ce03d54f693bcb17659cc6..86a73fa8360f45358bbd6bd4e98c5df0e6bedf9d 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -58,10 +58,31 @@ CONF_PY_ADDENDUM = """
 extensions.append('sphinx.ext.napoleon')
 napoleon_google_docstring = True
 napoleon_numpy_docstring = True
+napoleon_include_special_with_doc = True
 
 html_theme = 'sphinx_rtd_theme'
 """
 
+API_GLOSSARY = """
+
+Glossary
+================
+
+.. glossary::
+
+  metadatum
+    A key-value pair included in the HTTP header.  It is a 
+    2-tuple where the first entry is the key and the
+    second is the value, i.e. (key, value).  The metadata key is an ASCII str,
+    and must be a valid HTTP header name.  The metadata value can be
+    either a valid HTTP ASCII str, or bytes.  If bytes are provided,
+    the key must end with '-bin', i.e.
+    ``('binary-metadata-bin', b'\\x00\\xFF')``
+
+  metadata
+    A sequence of metadatum.
+"""
+
 
 class CommandError(Exception):
   """Simple exception class for GRPC custom commands."""
@@ -131,76 +152,12 @@ class SphinxDocumentation(setuptools.Command):
     conf_filepath = os.path.join('doc', 'src', 'conf.py')
     with open(conf_filepath, 'a') as conf_file:
       conf_file.write(CONF_PY_ADDENDUM)
+    glossary_filepath = os.path.join('doc', 'src', 'grpc.rst')
+    with open(glossary_filepath, 'a') as glossary_filepath:
+      glossary_filepath.write(API_GLOSSARY)
     sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
 
 
-class BuildProtoModules(setuptools.Command):
-  """Command to generate project *_pb2.py modules from proto files."""
-
-  description = 'build protobuf modules'
-  user_options = [
-    ('include=', None, 'path patterns to include in protobuf generation'),
-    ('exclude=', None, 'path patterns to exclude from protobuf generation')
-  ]
-
-  def initialize_options(self):
-    self.exclude = None
-    self.include = r'.*\.proto$'
-    self.protoc_command = None
-    self.grpc_python_plugin_command = None
-
-  def finalize_options(self):
-    self.protoc_command = distutils.spawn.find_executable('protoc')
-    self.grpc_python_plugin_command = distutils.spawn.find_executable(
-        'grpc_python_plugin')
-
-  def run(self):
-    if not self.protoc_command:
-      raise CommandError('could not find protoc')
-    if not self.grpc_python_plugin_command:
-      raise CommandError('could not find grpc_python_plugin '
-                         '(protoc plugin for GRPC Python)')
-
-    if not os.path.exists(PROTO_GEN_STEM):
-      os.makedirs(PROTO_GEN_STEM)
-
-    include_regex = re.compile(self.include)
-    exclude_regex = re.compile(self.exclude) if self.exclude else None
-    paths = []
-    for walk_root, directories, filenames in os.walk(PROTO_STEM):
-      for filename in filenames:
-        path = os.path.join(walk_root, filename)
-        if include_regex.match(path) and not (
-            exclude_regex and exclude_regex.match(path)):
-          paths.append(path)
-
-    # TODO(kpayson): It would be nice to do this in a batch command,
-    # but we currently have name conflicts in src/proto
-    for path in paths:
-      command = [
-          self.protoc_command,
-          '--plugin=protoc-gen-python-grpc={}'.format(
-              self.grpc_python_plugin_command),
-          '-I {}'.format(GRPC_STEM),
-          '--python_out={}'.format(PROTO_GEN_STEM),
-          '--python-grpc_out={}'.format(PROTO_GEN_STEM),
-      ] + [path]
-      try:
-        subprocess.check_output(' '.join(command), cwd=PYTHON_STEM, shell=True,
-                                stderr=subprocess.STDOUT)
-      except subprocess.CalledProcessError as e:
-        sys.stderr.write(
-            'warning: Command:\n{}\nMessage:\n{}\nOutput:\n{}'.format(
-                command, str(e), e.output))
-
-    # Generated proto directories dont include __init__.py, but
-    # these are needed for python package resolution
-    for walk_root, _, _ in os.walk(PROTO_GEN_STEM):
-      if walk_root != PROTO_GEN_STEM:
-        path = os.path.join(walk_root, '__init__.py')
-        open(path, 'a').close()
-
-
 class BuildProjectMetadata(setuptools.Command):
   """Command to generate project metadata in a module."""
 
@@ -223,10 +180,6 @@ class BuildPy(build_py.build_py):
   """Custom project build command."""
 
   def run(self):
-    try:
-      self.run_command('build_proto_modules')
-    except CommandError as error:
-      sys.stderr.write('warning: %s\n' % error.message)
     self.run_command('build_project_metadata')
     build_py.build_py.run(self)
 
@@ -279,76 +232,3 @@ class Gather(setuptools.Command):
       self.distribution.fetch_build_eggs(self.distribution.install_requires)
     if self.test and self.distribution.tests_require:
       self.distribution.fetch_build_eggs(self.distribution.tests_require)
-
-
-class TestLite(setuptools.Command):
-  """Command to run tests without fetching or building anything."""
-
-  description = 'run tests without fetching or building anything.'
-  user_options = []
-
-  def initialize_options(self):
-    pass
-
-  def finalize_options(self):
-    # distutils requires this override.
-    pass
-
-  def run(self):
-    self._add_eggs_to_path()
-
-    import tests
-    loader = tests.Loader()
-    loader.loadTestsFromNames(['tests'])
-    runner = tests.Runner()
-    result = runner.run(loader.suite)
-    if not result.wasSuccessful():
-      sys.exit('Test failure')
-
-  def _add_eggs_to_path(self):
-    """Fetch install and test requirements"""
-    self.distribution.fetch_build_eggs(self.distribution.install_requires)
-    self.distribution.fetch_build_eggs(self.distribution.tests_require)
-
-
-class RunInterop(test.test):
-
-  description = 'run interop test client/server'
-  user_options = [
-    ('args=', 'a', 'pass-thru arguments for the client/server'),
-    ('client', 'c', 'flag indicating to run the client'),
-    ('server', 's', 'flag indicating to run the server')
-  ]
-
-  def initialize_options(self):
-    self.args = ''
-    self.client = False
-    self.server = False
-
-  def finalize_options(self):
-    if self.client and self.server:
-      raise DistutilsOptionError('you may only specify one of client or server')
-
-  def run(self):
-    if self.distribution.install_requires:
-      self.distribution.fetch_build_eggs(self.distribution.install_requires)
-    if self.distribution.tests_require:
-      self.distribution.fetch_build_eggs(self.distribution.tests_require)
-    if self.client:
-      self.run_client()
-    elif self.server:
-      self.run_server()
-
-  def run_server(self):
-    # We import here to ensure that our setuptools parent has had a chance to
-    # edit the Python system path.
-    from tests.interop import server
-    sys.argv[1:] = self.args.split()
-    server.serve()
-
-  def run_client(self):
-    # We import here to ensure that our setuptools parent has had a chance to
-    # edit the Python system path.
-    from tests.interop import client
-    sys.argv[1:] = self.args.split()
-    client.test_interoperability()
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index d07a7a721d74152a240deffd15e7521b70a6157c..fd015129f02f169e2f335424d95c61497328c0b4 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -312,7 +312,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
     This method blocks until the value is available.
 
     Returns:
-      The initial metadata as a sequence of pairs of bytes.
+      The initial :term:`metadata`.
     """
     raise NotImplementedError()
 
@@ -323,7 +323,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
     This method blocks until the value is available.
 
     Returns:
-      The trailing metadata as a sequence of pairs of bytes.
+      The trailing :term:`metadata`.
     """
     raise NotImplementedError()
 
@@ -394,8 +394,7 @@ class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
     """Inform the gRPC runtime of the metadata to construct a CallCredentials.
 
     Args:
-      metadata: An iterable of 2-sequences (e.g. tuples) of metadata key/value
-        pairs.
+      metadata: The :term:`metadata` used to construct the CallCredentials.
       error: An Exception to indicate error or None to indicate success.
     """
     raise NotImplementedError()
@@ -436,23 +435,39 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
   """Affords invoking a unary-unary RPC."""
 
   @abc.abstractmethod
-  def __call__(
-      self, request, timeout=None, metadata=None, credentials=None,
-      with_call=False):
+  def __call__(self, request, timeout=None, metadata=None, credentials=None):
     """Synchronously invokes the underlying RPC.
 
     Args:
       request: The request value for the RPC.
       timeout: An optional duration of time in seconds to allow for the RPC.
-      metadata: An optional sequence of pairs of bytes to be transmitted to the
+      metadata: Optional :term:`metadata` to be transmitted to the
         service-side of the RPC.
       credentials: An optional CallCredentials for the RPC.
-      with_call: Whether or not to include return a Call for the RPC in addition
-        to the response.
 
     Returns:
-      The response value for the RPC, and a Call for the RPC if with_call was
-        set to True at invocation.
+      The response value for the RPC.
+
+    Raises:
+      RpcError: Indicating that the RPC terminated with non-OK status. The
+        raised RpcError will also be a Call for the RPC affording the RPC's
+        metadata, status code, and details.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def with_call(self, request, timeout=None, metadata=None, credentials=None):
+    """Synchronously invokes the underlying RPC.
+
+    Args:
+      request: The request value for the RPC.
+      timeout: An optional durating of time in seconds to allow for the RPC.
+      metadata: Optional :term:`metadata` to be transmitted to the
+        service-side of the RPC.
+      credentials: An optional CallCredentials for the RPC.
+
+    Returns:
+      The response value for the RPC and a Call value for the RPC.
 
     Raises:
       RpcError: Indicating that the RPC terminated with non-OK status. The
@@ -468,7 +483,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
     Args:
       request: The request value for the RPC.
       timeout: An optional duration of time in seconds to allow for the RPC.
-      metadata: An optional sequence of pairs of bytes to be transmitted to the
+      metadata: Optional :term:`metadata` to be transmitted to the
         service-side of the RPC.
       credentials: An optional CallCredentials for the RPC.
 
@@ -491,7 +506,7 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
     Args:
       request: The request value for the RPC.
       timeout: An optional duration of time in seconds to allow for the RPC.
-      metadata: An optional sequence of pairs of bytes to be transmitted to the
+      metadata: An optional :term:`metadata` to be transmitted to the
         service-side of the RPC.
       credentials: An optional CallCredentials for the RPC.
 
@@ -508,18 +523,15 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
 
   @abc.abstractmethod
   def __call__(
-      self, request_iterator, timeout=None, metadata=None, credentials=None,
-      with_call=False):
+      self, request_iterator, timeout=None, metadata=None, credentials=None):
     """Synchronously invokes the underlying RPC.
 
     Args:
       request_iterator: An iterator that yields request values for the RPC.
       timeout: An optional duration of time in seconds to allow for the RPC.
-      metadata: An optional sequence of pairs of bytes to be transmitted to the
+      metadata: Optional :term:`metadata` to be transmitted to the
         service-side of the RPC.
       credentials: An optional CallCredentials for the RPC.
-      with_call: Whether or not to include return a Call for the RPC in addition
-        to the response.
 
     Returns:
       The response value for the RPC, and a Call for the RPC if with_call was
@@ -532,6 +544,28 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
     """
     raise NotImplementedError()
 
+  @abc.abstractmethod
+  def with_call(
+      self, request_iterator, timeout=None, metadata=None, credentials=None):
+    """Synchronously invokes the underlying RPC.
+
+    Args:
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: An optional duration of time in seconds to allow for the RPC.
+      metadata: Optional :term:`metadata` to be transmitted to the
+        service-side of the RPC.
+      credentials: An optional CallCredentials for the RPC.
+
+    Returns:
+      The response value for the RPC and a Call for the RPC.
+
+    Raises:
+      RpcError: Indicating that the RPC terminated with non-OK status. The
+        raised RpcError will also be a Call for the RPC affording the RPC's
+        metadata, status code, and details.
+    """
+    raise NotImplementedError()
+
   @abc.abstractmethod
   def future(
       self, request_iterator, timeout=None, metadata=None, credentials=None):
@@ -540,7 +574,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
     Args:
       request_iterator: An iterator that yields request values for the RPC.
       timeout: An optional duration of time in seconds to allow for the RPC.
-      metadata: An optional sequence of pairs of bytes to be transmitted to the
+      metadata: Optional :term:`metadata` to be transmitted to the
         service-side of the RPC.
       credentials: An optional CallCredentials for the RPC.
 
@@ -564,7 +598,7 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
     Args:
       request_iterator: An iterator that yields request values for the RPC.
       timeout: An optional duration of time in seconds to allow for the RPC.
-      metadata: An optional sequence of pairs of bytes to be transmitted to the
+      metadata: Optional :term:`metadata` to be transmitted to the
         service-side of the RPC.
       credentials: An optional CallCredentials for the RPC.
 
@@ -672,7 +706,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
     """Accesses the metadata from the invocation-side of the RPC.
 
     Returns:
-      The invocation metadata object as a sequence of pairs of bytes.
+      The invocation :term:`metadata`.
     """
     raise NotImplementedError()
 
@@ -693,8 +727,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
     service-side initial metadata to transmit.
 
     Args:
-      initial_metadata: The initial metadata of the RPC as a sequence of pairs
-        of bytes.
+      initial_metadata: The initial :term:`metadata`.
     """
     raise NotImplementedError()
 
@@ -706,8 +739,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
     service-side trailing metadata to transmit.
 
     Args:
-      trailing_metadata: The trailing metadata of the RPC as a sequence of pairs
-        of bytes.
+      trailing_metadata: The trailing :term:`metadata`.
     """
     raise NotImplementedError()
 
@@ -780,7 +812,7 @@ class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
   """Describes an RPC that has just arrived for service.
   Attributes:
     method: The method name of the RPC.
-    invocation_metadata: The metadata from the invocation side of the RPC.
+    invocation_metadata: The :term:`metadata` from the invocation side of the RPC.
   """
 
 
@@ -1056,37 +1088,41 @@ def access_token_call_credentials(access_token):
       _auth.AccessTokenCallCredentials(access_token))
 
 
-def composite_call_credentials(call_credentials, additional_call_credentials):
-  """Compose two CallCredentials to make a new one.
+def composite_call_credentials(*call_credentials):
+  """Compose multiple CallCredentials to make a new CallCredentials.
 
   Args:
-    call_credentials: A CallCredentials object.
-    additional_call_credentials: Another CallCredentials object to compose on
-      top of call_credentials.
+    *call_credentials: At least two CallCredentials objects.
 
   Returns:
-    A new CallCredentials composed of the two given CallCredentials.
+    A CallCredentials object composed of the given CallCredentials objects.
   """
+  from grpc import _credential_composition
+  cygrpc_call_credentials = tuple(
+      single_call_credentials._credentials
+      for single_call_credentials in call_credentials)
   return CallCredentials(
-      _cygrpc.call_credentials_composite(
-          call_credentials._credentials,
-          additional_call_credentials._credentials))
+      _credential_composition.call(cygrpc_call_credentials))
 
 
-def composite_channel_credentials(channel_credentials, call_credentials):
-  """Compose a ChannelCredentials and a CallCredentials.
+def composite_channel_credentials(channel_credentials, *call_credentials):
+  """Compose a ChannelCredentials and one or more CallCredentials objects.
 
   Args:
     channel_credentials: A ChannelCredentials.
-    call_credentials: A CallCredentials.
+    *call_credentials: One or more CallCredentials objects.
 
   Returns:
     A ChannelCredentials composed of the given ChannelCredentials and
-      CallCredentials.
+      CallCredentials objects.
   """
+  from grpc import _credential_composition
+  cygrpc_call_credentials = tuple(
+      single_call_credentials._credentials
+      for single_call_credentials in call_credentials)
   return ChannelCredentials(
-      _cygrpc.channel_credentials_composite(
-          channel_credentials._credentials, call_credentials._credentials))
+      _credential_composition.channel(
+          channel_credentials._credentials, cygrpc_call_credentials))
 
 
 def ssl_server_credentials(
@@ -1172,25 +1208,23 @@ def secure_channel(target, credentials, options=None):
   return _channel.Channel(target, options, credentials._credentials)
 
 
-def server(generic_rpc_handlers, thread_pool, options=None):
+def server(thread_pool, handlers=None):
   """Creates a Server with which RPCs can be serviced.
 
-  The GenericRpcHandlers passed to this function needn't be the only
-  GenericRpcHandlers that will be used to serve RPCs; others may be added later
-  by calling add_generic_rpc_handlers any time before the returned server is
-  started.
-
   Args:
-    generic_rpc_handlers: Some number of GenericRpcHandlers that will be used
-      to service RPCs after the returned Server is started.
     thread_pool: A futures.ThreadPoolExecutor to be used by the returned Server
       to service RPCs.
+    handlers: An optional sequence of GenericRpcHandlers to be used to service
+      RPCs after the returned Server is started. These handlers need not be the
+      only handlers the server will use to service RPCs; other handlers may
+      later be added by calling add_generic_rpc_handlers any time before the
+      returned Server is started.
 
   Returns:
     A Server with which RPCs can be serviced.
   """
   from grpc import _server
-  return _server.Server(generic_rpc_handlers, thread_pool)
+  return _server.Server(thread_pool, () if handlers is None else handlers)
 
 
 ###################################  __all__  #################################
diff --git a/src/python/grpcio/grpc/_adapter/.gitignore b/src/python/grpcio/grpc/_adapter/.gitignore
deleted file mode 100644
index a6f96cd6dbb189f379fbfdfaef86003fe1d56503..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/_adapter/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.a
-*.so
-*.dll
-*.pyc
-*.pyd
diff --git a/src/python/grpcio/grpc/_adapter/_intermediary_low.py b/src/python/grpcio/grpc/_adapter/_intermediary_low.py
deleted file mode 100644
index 9698ffeabf1f967f8c4c86f147f22c488251d5cc..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/_adapter/_intermediary_low.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Temporary old _low-like layer.
-
-Eases refactoring burden while we overhaul the Python framework.
-
-Plan:
-    The layers used to look like:
-        ...                # outside _adapter
-        fore.py + rear.py  # visible outside _adapter
-        _low
-        _c
-    The layers currently look like:
-        ...                # outside _adapter
-        fore.py + rear.py  # visible outside _adapter
-        _low_intermediary  # adapter for new '_low' to old '_low'
-        _low               # new '_low'
-        _c                 # new '_c'
-    We will later remove _low_intermediary after refactoring of fore.py and
-    rear.py according to the ticket system refactoring and get:
-        ...                # outside _adapter, refactored
-        fore.py + rear.py  # visible outside _adapter, refactored
-        _low               # new '_low'
-        _c                 # new '_c'
-"""
-
-import collections
-import enum
-
-from grpc._adapter import _low
-from grpc._adapter import _types
-
-_IGNORE_ME_TAG = object()
-Code = _types.StatusCode
-WriteFlags = _types.OpWriteFlags
-
-
-class Status(collections.namedtuple('Status', ['code', 'details'])):
-  """Describes an RPC's overall status."""
-
-
-class ServiceAcceptance(
-    collections.namedtuple(
-        'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])):
-  """Describes an RPC on the service side at the start of service."""
-
-
-class Event(
-    collections.namedtuple(
-        'Event',
-        ['kind', 'tag', 'write_accepted', 'complete_accepted',
-         'service_acceptance', 'bytes', 'status', 'metadata'])):
-  """Describes an event emitted from a completion queue."""
-
-  @enum.unique
-  class Kind(enum.Enum):
-    """Describes the kind of an event."""
-
-    STOP = object()
-    WRITE_ACCEPTED = object()
-    COMPLETE_ACCEPTED = object()
-    SERVICE_ACCEPTED = object()
-    READ_ACCEPTED = object()
-    METADATA_ACCEPTED = object()
-    FINISH = object()
-
-
-class _TagAdapter(collections.namedtuple('_TagAdapter', [
-      'user_tag',
-      'kind'
-  ])):
-  pass
-
-
-class Call(object):
-  """Adapter from old _low.Call interface to new _low.Call."""
-
-  def __init__(self, channel, completion_queue, method, host, deadline):
-    self._internal = channel._internal.create_call(
-        completion_queue._internal, method, host, deadline)
-    self._metadata = []
-
-  @staticmethod
-  def _from_internal(internal):
-    call = Call.__new__(Call)
-    call._internal = internal
-    call._metadata = []
-    return call
-
-  def invoke(self, completion_queue, metadata_tag, finish_tag):
-    err = self._internal.start_batch([
-          _types.OpArgs.send_initial_metadata(self._metadata)
-      ], _IGNORE_ME_TAG)
-    if err != _types.CallError.OK:
-      return err
-    err = self._internal.start_batch([
-          _types.OpArgs.recv_initial_metadata()
-      ], _TagAdapter(metadata_tag, Event.Kind.METADATA_ACCEPTED))
-    if err != _types.CallError.OK:
-      return err
-    err = self._internal.start_batch([
-          _types.OpArgs.recv_status_on_client()
-      ], _TagAdapter(finish_tag, Event.Kind.FINISH))
-    return err
-
-  def write(self, message, tag, flags):
-    return self._internal.start_batch([
-          _types.OpArgs.send_message(message, flags)
-      ], _TagAdapter(tag, Event.Kind.WRITE_ACCEPTED))
-
-  def complete(self, tag):
-    return self._internal.start_batch([
-          _types.OpArgs.send_close_from_client()
-      ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED))
-
-  def accept(self, completion_queue, tag):
-    return self._internal.start_batch([
-          _types.OpArgs.recv_close_on_server()
-      ], _TagAdapter(tag, Event.Kind.FINISH))
-
-  def add_metadata(self, key, value):
-    self._metadata.append((key, value))
-
-  def premetadata(self):
-    result = self._internal.start_batch([
-          _types.OpArgs.send_initial_metadata(self._metadata)
-      ], _IGNORE_ME_TAG)
-    self._metadata = []
-    return result
-
-  def read(self, tag):
-    return self._internal.start_batch([
-          _types.OpArgs.recv_message()
-      ], _TagAdapter(tag, Event.Kind.READ_ACCEPTED))
-
-  def status(self, status, tag):
-    return self._internal.start_batch([
-          _types.OpArgs.send_status_from_server(
-              self._metadata, status.code, status.details)
-      ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED))
-
-  def cancel(self):
-    return self._internal.cancel()
-
-  def peer(self):
-    return self._internal.peer()
-
-  def set_credentials(self, creds):
-    return self._internal.set_credentials(creds)
-
-
-class Channel(object):
-  """Adapter from old _low.Channel interface to new _low.Channel."""
-
-  def __init__(self, hostport, channel_credentials, server_host_override=None):
-    args = []
-    if server_host_override:
-      args.append((_types.GrpcChannelArgumentKeys.SSL_TARGET_NAME_OVERRIDE.value, server_host_override))
-    self._internal = _low.Channel(hostport, args, channel_credentials)
-
-
-class CompletionQueue(object):
-  """Adapter from old _low.CompletionQueue interface to new _low.CompletionQueue."""
-
-  def __init__(self):
-    self._internal = _low.CompletionQueue()
-
-  def get(self, deadline=None):
-    if deadline is None:
-      ev = self._internal.next(float('+inf'))
-    else:
-      ev = self._internal.next(deadline)
-    if ev is None:
-      return None
-    elif ev.tag is _IGNORE_ME_TAG:
-      return self.get(deadline)
-    elif ev.type == _types.EventType.QUEUE_SHUTDOWN:
-      kind = Event.Kind.STOP
-      tag = None
-      write_accepted = None
-      complete_accepted = None
-      service_acceptance = None
-      message_bytes = None
-      status = None
-      metadata = None
-    elif ev.type == _types.EventType.OP_COMPLETE:
-      kind = ev.tag.kind
-      tag = ev.tag.user_tag
-      write_accepted = ev.success if kind == Event.Kind.WRITE_ACCEPTED else None
-      complete_accepted = ev.success if kind == Event.Kind.COMPLETE_ACCEPTED else None
-      service_acceptance = ServiceAcceptance(Call._from_internal(ev.call), ev.call_details.method, ev.call_details.host, ev.call_details.deadline) if kind == Event.Kind.SERVICE_ACCEPTED else None
-      message_bytes = ev.results[0].message if kind == Event.Kind.READ_ACCEPTED else None
-      status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if len(ev.results) > 0 and ev.results[0].cancelled is not None else None
-      metadata = ev.results[0].initial_metadata if (kind in [Event.Kind.SERVICE_ACCEPTED, Event.Kind.METADATA_ACCEPTED]) else (ev.results[0].trailing_metadata if kind == Event.Kind.FINISH else None)
-    else:
-      raise RuntimeError('unknown event')
-    result_ev = Event(kind=kind, tag=tag, write_accepted=write_accepted, complete_accepted=complete_accepted, service_acceptance=service_acceptance, bytes=message_bytes, status=status, metadata=metadata)
-    return result_ev
-
-  def stop(self):
-    self._internal.shutdown()
-
-
-class Server(object):
-  """Adapter from old _low.Server interface to new _low.Server."""
-
-  def __init__(self, completion_queue):
-    self._internal = _low.Server(completion_queue._internal, [])
-    self._internal_cq = completion_queue._internal
-
-  def add_http2_addr(self, addr):
-    return self._internal.add_http2_port(addr)
-
-  def add_secure_http2_addr(self, addr, server_credentials):
-    if server_credentials is None:
-      return self._internal.add_http2_port(addr, None)
-    else:
-      return self._internal.add_http2_port(addr, server_credentials)
-
-  def start(self):
-    return self._internal.start()
-
-  def service(self, tag):
-    return self._internal.request_call(self._internal_cq, _TagAdapter(tag, Event.Kind.SERVICE_ACCEPTED))
-
-  def cancel_all_calls(self):
-    self._internal.cancel_all_calls()
-
-  def stop(self):
-    return self._internal.shutdown(_TagAdapter(None, Event.Kind.STOP))
-
diff --git a/src/python/grpcio/grpc/_adapter/_low.py b/src/python/grpcio/grpc/_adapter/_low.py
deleted file mode 100644
index 48410167a07b6fb4e79ba284f5f667f1dabb62c5..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/_adapter/_low.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import threading
-
-from grpc import _grpcio_metadata
-from grpc import _plugin_wrapping
-from grpc._cython import cygrpc
-from grpc._adapter import _types
-
-_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
-
-ChannelCredentials = cygrpc.ChannelCredentials
-CallCredentials = cygrpc.CallCredentials
-ServerCredentials = cygrpc.ServerCredentials
-
-channel_credentials_composite = cygrpc.channel_credentials_composite
-call_credentials_composite = cygrpc.call_credentials_composite
-
-def server_credentials_ssl(root_credentials, pair_sequence, force_client_auth):
-  return cygrpc.server_credentials_ssl(
-      root_credentials,
-      [cygrpc.SslPemKeyCertPair(key, pem) for key, pem in pair_sequence],
-      force_client_auth)
-
-def channel_credentials_ssl(
-    root_certificates, private_key, certificate_chain):
-  pair = None
-  if private_key is not None or certificate_chain is not None:
-    pair = cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
-  return cygrpc.channel_credentials_ssl(root_certificates, pair)
-
-
-call_credentials_metadata_plugin = (
-    _plugin_wrapping.call_credentials_metadata_plugin)
-
-
-class CompletionQueue(_types.CompletionQueue):
-
-  def __init__(self):
-    self.completion_queue = cygrpc.CompletionQueue()
-
-  def next(self, deadline=float('+inf')):
-    raw_event = self.completion_queue.poll(cygrpc.Timespec(deadline))
-    if raw_event.type == cygrpc.CompletionType.queue_timeout:
-      return None
-    event_type = raw_event.type
-    event_tag = raw_event.tag
-    event_call = Call(raw_event.operation_call)
-    if raw_event.request_call_details:
-      event_call_details = _types.CallDetails(
-          raw_event.request_call_details.method,
-          raw_event.request_call_details.host,
-          float(raw_event.request_call_details.deadline))
-    else:
-      event_call_details = None
-    event_success = raw_event.success
-    event_results = []
-    if raw_event.is_new_request:
-      event_results.append(_types.OpResult(
-          _types.OpType.RECV_INITIAL_METADATA, raw_event.request_metadata,
-          None, None, None, None))
-    else:
-      if raw_event.batch_operations:
-        for operation in raw_event.batch_operations:
-          result_type = operation.type
-          result_initial_metadata = operation.received_metadata_or_none
-          result_trailing_metadata = operation.received_metadata_or_none
-          result_message = operation.received_message_or_none
-          if result_message is not None:
-            result_message = result_message.bytes()
-          result_cancelled = operation.received_cancelled_or_none
-          if operation.has_status:
-            result_status = _types.Status(
-                operation.received_status_code_or_none,
-                operation.received_status_details_or_none)
-          else:
-            result_status = None
-          event_results.append(
-              _types.OpResult(result_type, result_initial_metadata,
-                              result_trailing_metadata, result_message,
-                              result_status, result_cancelled))
-    return _types.Event(event_type, event_tag, event_call, event_call_details,
-                        event_results, event_success)
-
-  def shutdown(self):
-    self.completion_queue.shutdown()
-
-
-class Call(_types.Call):
-
-  def __init__(self, call):
-    self.call = call
-
-  def start_batch(self, ops, tag):
-    translated_ops = []
-    for op in ops:
-      if op.type == _types.OpType.SEND_INITIAL_METADATA:
-        translated_op = cygrpc.operation_send_initial_metadata(
-            cygrpc.Metadata(
-                cygrpc.Metadatum(key, value)
-                for key, value in op.initial_metadata),
-            op.flags)
-      elif op.type == _types.OpType.SEND_MESSAGE:
-        translated_op = cygrpc.operation_send_message(op.message, op.flags)
-      elif op.type == _types.OpType.SEND_CLOSE_FROM_CLIENT:
-        translated_op = cygrpc.operation_send_close_from_client(op.flags)
-      elif op.type == _types.OpType.SEND_STATUS_FROM_SERVER:
-        translated_op = cygrpc.operation_send_status_from_server(
-            cygrpc.Metadata(
-                cygrpc.Metadatum(key, value)
-                for key, value in op.trailing_metadata),
-            op.status.code,
-            op.status.details,
-            op.flags)
-      elif op.type == _types.OpType.RECV_INITIAL_METADATA:
-        translated_op = cygrpc.operation_receive_initial_metadata(
-            op.flags)
-      elif op.type == _types.OpType.RECV_MESSAGE:
-        translated_op = cygrpc.operation_receive_message(op.flags)
-      elif op.type == _types.OpType.RECV_STATUS_ON_CLIENT:
-        translated_op = cygrpc.operation_receive_status_on_client(
-            op.flags)
-      elif op.type == _types.OpType.RECV_CLOSE_ON_SERVER:
-        translated_op = cygrpc.operation_receive_close_on_server(op.flags)
-      else:
-        raise ValueError('unexpected operation type {}'.format(op.type))
-      translated_ops.append(translated_op)
-    return self.call.start_batch(cygrpc.Operations(translated_ops), tag)
-
-  def cancel(self, code=None, details=None):
-    if code is None and details is None:
-      return self.call.cancel()
-    else:
-      return self.call.cancel(code, details)
-
-  def peer(self):
-    return self.call.peer()
-
-  def set_credentials(self, creds):
-    return self.call.set_credentials(creds)
-
-
-class Channel(_types.Channel):
-
-  def __init__(self, target, args, creds=None):
-    args = list(args) + [
-        (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)]
-    args = cygrpc.ChannelArgs(
-        cygrpc.ChannelArg(key, value) for key, value in args)
-    if creds is None:
-      self.channel = cygrpc.Channel(target, args)
-    else:
-      self.channel = cygrpc.Channel(target, args, creds)
-
-  def create_call(self, completion_queue, method, host, deadline=None):
-    internal_call = self.channel.create_call(
-        None, 0, completion_queue.completion_queue, method, host,
-        cygrpc.Timespec(deadline))
-    return Call(internal_call)
-
-  def check_connectivity_state(self, try_to_connect):
-    return self.channel.check_connectivity_state(try_to_connect)
-
-  def watch_connectivity_state(self, last_observed_state, deadline,
-                               completion_queue, tag):
-    self.channel.watch_connectivity_state(
-        last_observed_state, cygrpc.Timespec(deadline),
-        completion_queue.completion_queue, tag)
-
-  def target(self):
-    return self.channel.target()
-
-
-_NO_TAG = object()
-
-class Server(_types.Server):
-
-  def __init__(self, completion_queue, args):
-    args = cygrpc.ChannelArgs(
-        cygrpc.ChannelArg(key, value) for key, value in args)
-    self.server = cygrpc.Server(args)
-    self.server.register_completion_queue(completion_queue.completion_queue)
-    self.server_queue = completion_queue
-
-  def add_http2_port(self, addr, creds=None):
-    if creds is None:
-      return self.server.add_http2_port(addr)
-    else:
-      return self.server.add_http2_port(addr, creds)
-
-  def start(self):
-    return self.server.start()
-
-  def shutdown(self, tag=None):
-    return self.server.shutdown(self.server_queue.completion_queue, tag)
-
-  def request_call(self, completion_queue, tag):
-    return self.server.request_call(completion_queue.completion_queue,
-                                    self.server_queue.completion_queue, tag)
-
-  def cancel_all_calls(self):
-    return self.server.cancel_all_calls()
diff --git a/src/python/grpcio/grpc/_adapter/_types.py b/src/python/grpcio/grpc/_adapter/_types.py
deleted file mode 100644
index b7cc6fbbb594ed97d812f9c4d6d4f753353c54d7..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/_adapter/_types.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import abc
-import collections
-import enum
-
-import six
-
-from grpc._cython import cygrpc
-
-
-class GrpcChannelArgumentKeys(enum.Enum):
-  """Mirrors keys used in grpc_channel_args for GRPC-specific arguments."""
-  SSL_TARGET_NAME_OVERRIDE = 'grpc.ssl_target_name_override'
-
-
-@enum.unique
-class CallError(enum.IntEnum):
-  """Mirrors grpc_call_error in the C core."""
-  OK                        = cygrpc.CallError.ok
-  ERROR                     = cygrpc.CallError.error
-  ERROR_NOT_ON_SERVER       = cygrpc.CallError.not_on_server
-  ERROR_NOT_ON_CLIENT       = cygrpc.CallError.not_on_client
-  ERROR_ALREADY_ACCEPTED    = cygrpc.CallError.already_accepted
-  ERROR_ALREADY_INVOKED     = cygrpc.CallError.already_invoked
-  ERROR_NOT_INVOKED         = cygrpc.CallError.not_invoked
-  ERROR_ALREADY_FINISHED    = cygrpc.CallError.already_finished
-  ERROR_TOO_MANY_OPERATIONS = cygrpc.CallError.too_many_operations
-  ERROR_INVALID_FLAGS       = cygrpc.CallError.invalid_flags
-  ERROR_INVALID_METADATA    = cygrpc.CallError.invalid_metadata
-
-
-@enum.unique
-class StatusCode(enum.IntEnum):
-  """Mirrors grpc_status_code in the C core."""
-  OK                  = cygrpc.StatusCode.ok
-  CANCELLED           = cygrpc.StatusCode.cancelled
-  UNKNOWN             = cygrpc.StatusCode.unknown
-  INVALID_ARGUMENT    = cygrpc.StatusCode.invalid_argument
-  DEADLINE_EXCEEDED   = cygrpc.StatusCode.deadline_exceeded
-  NOT_FOUND           = cygrpc.StatusCode.not_found
-  ALREADY_EXISTS      = cygrpc.StatusCode.already_exists
-  PERMISSION_DENIED   = cygrpc.StatusCode.permission_denied
-  RESOURCE_EXHAUSTED  = cygrpc.StatusCode.resource_exhausted
-  FAILED_PRECONDITION = cygrpc.StatusCode.failed_precondition
-  ABORTED             = cygrpc.StatusCode.aborted
-  OUT_OF_RANGE        = cygrpc.StatusCode.out_of_range
-  UNIMPLEMENTED       = cygrpc.StatusCode.unimplemented
-  INTERNAL            = cygrpc.StatusCode.internal
-  UNAVAILABLE         = cygrpc.StatusCode.unavailable
-  DATA_LOSS           = cygrpc.StatusCode.data_loss
-  UNAUTHENTICATED     = cygrpc.StatusCode.unauthenticated
-
-
-@enum.unique
-class OpWriteFlags(enum.IntEnum):
-  """Mirrors defined write-flag constants in the C core."""
-  WRITE_BUFFER_HINT = cygrpc.WriteFlag.buffer_hint
-  WRITE_NO_COMPRESS = cygrpc.WriteFlag.no_compress
-
-
-@enum.unique
-class OpType(enum.IntEnum):
-  """Mirrors grpc_op_type in the C core."""
-  SEND_INITIAL_METADATA   = cygrpc.OperationType.send_initial_metadata
-  SEND_MESSAGE            = cygrpc.OperationType.send_message
-  SEND_CLOSE_FROM_CLIENT  = cygrpc.OperationType.send_close_from_client
-  SEND_STATUS_FROM_SERVER = cygrpc.OperationType.send_status_from_server
-  RECV_INITIAL_METADATA   = cygrpc.OperationType.receive_initial_metadata
-  RECV_MESSAGE            = cygrpc.OperationType.receive_message
-  RECV_STATUS_ON_CLIENT   = cygrpc.OperationType.receive_status_on_client
-  RECV_CLOSE_ON_SERVER    = cygrpc.OperationType.receive_close_on_server
-
-
-@enum.unique
-class EventType(enum.IntEnum):
-  """Mirrors grpc_completion_type in the C core."""
-  QUEUE_SHUTDOWN = cygrpc.CompletionType.queue_shutdown
-  QUEUE_TIMEOUT  = cygrpc.CompletionType.queue_timeout
-  OP_COMPLETE    = cygrpc.CompletionType.operation_complete
-
-
-@enum.unique
-class ConnectivityState(enum.IntEnum):
-  """Mirrors grpc_connectivity_state in the C core."""
-  IDLE              = cygrpc.ConnectivityState.idle
-  CONNECTING        = cygrpc.ConnectivityState.connecting
-  READY             = cygrpc.ConnectivityState.ready
-  TRANSIENT_FAILURE = cygrpc.ConnectivityState.transient_failure
-  FATAL_FAILURE     = cygrpc.ConnectivityState.shutdown
-
-
-class Status(collections.namedtuple(
-    'Status', [
-        'code',
-        'details',
-    ])):
-  """The end status of a GRPC call.
-
-  Attributes:
-    code (StatusCode): ...
-    details (str): ...
-  """
-
-
-class CallDetails(collections.namedtuple(
-    'CallDetails', [
-        'method',
-        'host',
-        'deadline',
-    ])):
-  """Provides information to the server about the client's call.
-
-  Attributes:
-    method (str): ...
-    host (str): ...
-    deadline (float): ...
-  """
-
-
-class OpArgs(collections.namedtuple(
-    'OpArgs', [
-        'type',
-        'initial_metadata',
-        'trailing_metadata',
-        'message',
-        'status',
-        'flags',
-    ])):
-  """Arguments passed into a GRPC operation.
-
-  Attributes:
-    type (OpType): ...
-    initial_metadata (sequence of 2-sequence of str): Only valid if type ==
-      OpType.SEND_INITIAL_METADATA, else is None.
-    trailing_metadata (sequence of 2-sequence of str): Only valid if type ==
-      OpType.SEND_STATUS_FROM_SERVER, else is None.
-    message (bytes): Only valid if type == OpType.SEND_MESSAGE, else is None.
-    status (Status): Only valid if type == OpType.SEND_STATUS_FROM_SERVER, else
-      is None.
-    flags (int): a bitwise OR'ing of 0 or more OpWriteFlags values.
-  """
-
-  @staticmethod
-  def send_initial_metadata(initial_metadata):
-    return OpArgs(OpType.SEND_INITIAL_METADATA, initial_metadata, None, None, None, 0)
-
-  @staticmethod
-  def send_message(message, flags):
-    return OpArgs(OpType.SEND_MESSAGE, None, None, message, None, flags)
-
-  @staticmethod
-  def send_close_from_client():
-    return OpArgs(OpType.SEND_CLOSE_FROM_CLIENT, None, None, None, None, 0)
-
-  @staticmethod
-  def send_status_from_server(trailing_metadata, status_code, status_details):
-    return OpArgs(OpType.SEND_STATUS_FROM_SERVER, None, trailing_metadata, None, Status(status_code, status_details), 0)
-
-  @staticmethod
-  def recv_initial_metadata():
-    return OpArgs(OpType.RECV_INITIAL_METADATA, None, None, None, None, 0);
-
-  @staticmethod
-  def recv_message():
-    return OpArgs(OpType.RECV_MESSAGE, None, None, None, None, 0)
-
-  @staticmethod
-  def recv_status_on_client():
-    return OpArgs(OpType.RECV_STATUS_ON_CLIENT, None, None, None, None, 0)
-
-  @staticmethod
-  def recv_close_on_server():
-    return OpArgs(OpType.RECV_CLOSE_ON_SERVER, None, None, None, None, 0)
-
-
-class OpResult(collections.namedtuple(
-    'OpResult', [
-        'type',
-        'initial_metadata',
-        'trailing_metadata',
-        'message',
-        'status',
-        'cancelled',
-    ])):
-  """Results received from a GRPC operation.
-
-  Attributes:
-    type (OpType): ...
-    initial_metadata (sequence of 2-sequence of str): Only valid if type ==
-      OpType.RECV_INITIAL_METADATA, else is None.
-    trailing_metadata (sequence of 2-sequence of str): Only valid if type ==
-      OpType.RECV_STATUS_ON_CLIENT, else is None.
-    message (bytes): Only valid if type == OpType.RECV_MESSAGE, else is None.
-    status (Status): Only valid if type == OpType.RECV_STATUS_ON_CLIENT, else
-      is None.
-    cancelled (bool): Only valid if type == OpType.RECV_CLOSE_ON_SERVER, else
-      is None.
-  """
-
-
-class Event(collections.namedtuple(
-    'Event', [
-        'type',
-        'tag',
-        'call',
-        'call_details',
-        'results',
-        'success',
-    ])):
-  """An event received from a GRPC completion queue.
-
-  Attributes:
-    type (EventType): ...
-    tag (object): ...
-    call (Call): The Call object associated with this event (if there is one,
-      else None).
-    call_details (CallDetails): The call details associated with the
-      server-side call (if there is such information, else None).
-    results (list of OpResult): ...
-    success (bool): ...
-  """
-
-
-class CompletionQueue(six.with_metaclass(abc.ABCMeta)):
-
-  @abc.abstractmethod
-  def __init__(self):
-    pass
-
-  def __iter__(self):
-    """This class may be iterated over.
-
-    This is the equivalent of calling next() repeatedly with an absolute
-    deadline of None (i.e. no deadline).
-    """
-    return self
-
-  def __next__(self):
-    return self.next()
-
-  @abc.abstractmethod
-  def next(self, deadline=float('+inf')):
-    """Get the next event on this completion queue.
-
-    Args:
-      deadline (float): absolute deadline in seconds from the Python epoch, or
-        None for no deadline.
-
-    Returns:
-      Event: ...
-    """
-    pass
-
-  @abc.abstractmethod
-  def shutdown(self):
-    """Begin the shutdown process of this completion queue.
-
-    Note that this does not immediately destroy the completion queue.
-    Nevertheless, user code should not pass it around after invoking this.
-    """
-    return None
-
-
-class Call(six.with_metaclass(abc.ABCMeta)):
-
-  @abc.abstractmethod
-  def start_batch(self, ops, tag):
-    """Start a batch of operations.
-
-    Args:
-      ops (sequence of OpArgs): ...
-      tag (object): ...
-
-    Returns:
-      CallError: ...
-    """
-    return CallError.ERROR
-
-  @abc.abstractmethod
-  def cancel(self, code=None, details=None):
-    """Cancel the call.
-
-    Args:
-      code (int): Status code to cancel with (on the server side). If
-        specified, so must `details`.
-      details (str): Status details to cancel with (on the server side). If
-        specified, so must `code`.
-
-    Returns:
-      CallError: ...
-    """
-    return CallError.ERROR
-
-  @abc.abstractmethod
-  def peer(self):
-    """Get the peer of this call.
-
-    Returns:
-      str: the peer of this call.
-    """
-    return None
-
-  def set_credentials(self, creds):
-    """Set per-call credentials.
-
-    Args:
-      creds (CallCredentials): Credentials to be set for this call.
-    """
-    return None
-
-
-class Channel(six.with_metaclass(abc.ABCMeta)):
-
-  @abc.abstractmethod
-  def __init__(self, target, args, credentials=None):
-    """Initialize a Channel.
-
-    Args:
-      target (str): ...
-      args (sequence of 2-sequence of str, (str|integer)): ...
-      credentials (ChannelCredentials): If None, create an insecure channel,
-        else create a secure channel using the client credentials.
-    """
-
-  @abc.abstractmethod
-  def create_call(self, completion_queue, method, host, deadline=float('+inf')):
-    """Create a call from this channel.
-
-    Args:
-      completion_queue (CompletionQueue): ...
-      method (str): ...
-      host (str): ...
-      deadline (float): absolute deadline in seconds from the Python epoch, or
-        None for no deadline.
-
-    Returns:
-      Call: call object associated with this Channel and passed parameters.
-    """
-    return None
-
-  @abc.abstractmethod
-  def check_connectivity_state(self, try_to_connect):
-    """Check and optionally repair the connectivity state of the channel.
-
-    Args:
-      try_to_connect (bool): whether or not to try to connect the channel if
-      disconnected.
-
-    Returns:
-      ConnectivityState: state of the channel at the time of this invocation.
-    """
-    return None
-
-  @abc.abstractmethod
-  def watch_connectivity_state(self, last_observed_state, deadline,
-                               completion_queue, tag):
-    """Watch for connectivity state changes from the last_observed_state.
-
-    Args:
-      last_observed_state (ConnectivityState): ...
-      deadline (float): ...
-      completion_queue (CompletionQueue): ...
-      tag (object) ...
-    """
-
-  @abc.abstractmethod
-  def target(self):
-    """Get the target of this channel.
-
-    Returns:
-      str: the target of this channel.
-    """
-    return None
-
-
-class Server(six.with_metaclass(abc.ABCMeta)):
-
-  @abc.abstractmethod
-  def __init__(self, completion_queue, args):
-    """Initialize a server.
-
-    Args:
-      completion_queue (CompletionQueue): ...
-      args (sequence of 2-sequence of str, (str|integer)): ...
-    """
-
-  @abc.abstractmethod
-  def add_http2_port(self, address, credentials=None):
-    """Adds an HTTP/2 address+port to the server.
-
-    Args:
-      address (str): ...
-      credentials (ServerCredentials): If None, create an insecure port, else
-        create a secure port using the server credentials.
-    """
-
-  @abc.abstractmethod
-  def start(self):
-    """Starts the server."""
-
-  @abc.abstractmethod
-  def shutdown(self, tag=None):
-    """Shuts down the server. Does not immediately destroy the server.
-
-    Args:
-      tag (object): if not None, have the server place an event on its
-        completion queue notifying it when this server has completely shut down.
-    """
-
-  @abc.abstractmethod
-  def request_call(self, completion_queue, tag):
-    """Requests a call from the server on the server's completion queue.
-
-    Args:
-      completion_queue (CompletionQueue): Completion queue for the call. May be
-        the same as the server's completion queue.
-      tag (object) ...
-    """
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index 7cdd542de2733198e8ceee988860f18519cfeae8..29dbc3a668ba5414f6a2b49a1644dad746c2947f 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -179,6 +179,7 @@ def _event_handler(state, call, response_deserializer):
 def _consume_request_iterator(
     request_iterator, state, call, request_serializer):
   event_handler = _event_handler(state, call, None)
+
   def consume_request_iterator():
     for request in request_iterator:
       serialized_request = _common.serialize(request, request_serializer)
@@ -194,7 +195,8 @@ def _consume_request_iterator(
                 cygrpc.operation_send_message(
                     serialized_request, _EMPTY_FLAGS),
             )
-            call.start_batch(cygrpc.Operations(operations), event_handler)
+            call.start_client_batch(cygrpc.Operations(operations),
+                                    event_handler)
             state.due.add(cygrpc.OperationType.send_message)
             while True:
               state.condition.wait()
@@ -210,10 +212,20 @@ def _consume_request_iterator(
         operations = (
             cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
         )
-        call.start_batch(cygrpc.Operations(operations), event_handler)
+        call.start_client_batch(cygrpc.Operations(operations), event_handler)
         state.due.add(cygrpc.OperationType.send_close_from_client)
-  thread = threading.Thread(target=consume_request_iterator)
-  thread.start()
+
+  def stop_consumption_thread(timeout):
+    with state.condition:
+      if state.code is None:
+        call.cancel()
+        state.cancelled = True
+        _abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
+        state.condition.notify_all()
+
+  consumption_thread = _common.CleanupThread(
+      stop_consumption_thread, target=consume_request_iterator)
+  consumption_thread.start()
 
 
 class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
@@ -301,7 +313,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
       if self._state.code is None:
         event_handler = _event_handler(
             self._state, self._call, self._response_deserializer)
-        self._call.start_batch(
+        self._call.start_client_batch(
             cygrpc.Operations(
                 (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
             event_handler)
@@ -353,13 +365,13 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
     with self._state.condition:
       while self._state.initial_metadata is None:
         self._state.condition.wait()
-      return self._state.initial_metadata
+      return _common.application_metadata(self._state.initial_metadata)
 
   def trailing_metadata(self):
     with self._state.condition:
       while self._state.trailing_metadata is None:
         self._state.condition.wait()
-      return self._state.trailing_metadata
+      return _common.application_metadata(self._state.trailing_metadata)
 
   def code(self):
     with self._state.condition:
@@ -371,7 +383,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
     with self._state.condition:
       while self._state.details is None:
         self._state.condition.wait()
-      return self._state.details
+      return _common.decode(self._state.details)
 
   def _repr(self):
     with self._state.condition:
@@ -379,7 +391,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
         return '<_Rendezvous object of in-flight RPC>'
       else:
         return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
-            self._state.code, self._state.details)
+            self._state.code, _common.decode(self._state.details))
 
   def __repr__(self):
     return self._repr()
@@ -440,7 +452,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
       state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
       operations = (
           cygrpc.operation_send_initial_metadata(
-              _common.metadata(metadata), _EMPTY_FLAGS),
+              _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
           cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
           cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
           cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
@@ -449,9 +461,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
       )
       return state, operations, deadline, deadline_timespec, None
 
-  def __call__(
-      self, request, timeout=None, metadata=None, credentials=None,
-      with_call=False):
+  def _blocking(self, request, timeout, metadata, credentials):
     state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
         request, timeout, metadata)
     if rendezvous:
@@ -462,9 +472,17 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
           None, 0, completion_queue, self._method, None, deadline_timespec)
       if credentials is not None:
         call.set_credentials(credentials._credentials)
-      call.start_batch(cygrpc.Operations(operations), None)
+      call.start_client_batch(cygrpc.Operations(operations), None)
       _handle_event(completion_queue.poll(), state, self._response_deserializer)
-      return _end_unary_response_blocking(state, with_call, deadline)
+      return state, deadline
+
+  def __call__(self, request, timeout=None, metadata=None, credentials=None):
+    state, deadline, = self._blocking(request, timeout, metadata, credentials)
+    return _end_unary_response_blocking(state, False, deadline)
+
+  def with_call(self, request, timeout=None, metadata=None, credentials=None):
+    state, deadline, = self._blocking(request, timeout, metadata, credentials)
+    return _end_unary_response_blocking(state, True, deadline)
 
   def future(self, request, timeout=None, metadata=None, credentials=None):
     state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
@@ -478,7 +496,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
         call.set_credentials(credentials._credentials)
       event_handler = _event_handler(state, call, self._response_deserializer)
       with state.condition:
-        call.start_batch(cygrpc.Operations(operations), event_handler)
+        call.start_client_batch(cygrpc.Operations(operations), event_handler)
       return _Rendezvous(state, call, self._response_deserializer, deadline)
 
 
@@ -506,18 +524,18 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
         call.set_credentials(credentials._credentials)
       event_handler = _event_handler(state, call, self._response_deserializer)
       with state.condition:
-        call.start_batch(
+        call.start_client_batch(
             cygrpc.Operations(
                 (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
             event_handler)
         operations = (
             cygrpc.operation_send_initial_metadata(
-                _common.metadata(metadata), _EMPTY_FLAGS),
+                _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
             cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
             cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
             cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
         )
-        call.start_batch(cygrpc.Operations(operations), event_handler)
+        call.start_client_batch(cygrpc.Operations(operations), event_handler)
       return _Rendezvous(state, call, self._response_deserializer, deadline)
 
 
@@ -532,9 +550,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
     self._request_serializer = request_serializer
     self._response_deserializer = response_deserializer
 
-  def __call__(
-      self, request_iterator, timeout=None, metadata=None, credentials=None,
-      with_call=False):
+  def _blocking(self, request_iterator, timeout, metadata, credentials):
     deadline, deadline_timespec = _deadline(timeout)
     state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
     completion_queue = cygrpc.CompletionQueue()
@@ -543,17 +559,17 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
     if credentials is not None:
       call.set_credentials(credentials._credentials)
     with state.condition:
-      call.start_batch(
+      call.start_client_batch(
           cygrpc.Operations(
               (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
           None)
       operations = (
           cygrpc.operation_send_initial_metadata(
-              _common.metadata(metadata), _EMPTY_FLAGS),
+              _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
           cygrpc.operation_receive_message(_EMPTY_FLAGS),
           cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
       )
-      call.start_batch(cygrpc.Operations(operations), None)
+      call.start_client_batch(cygrpc.Operations(operations), None)
       _consume_request_iterator(
           request_iterator, state, call, self._request_serializer)
     while True:
@@ -563,7 +579,19 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
         state.condition.notify_all()
         if not state.due:
           break
-    return _end_unary_response_blocking(state, with_call, deadline)
+    return state, deadline
+
+  def __call__(
+      self, request_iterator, timeout=None, metadata=None, credentials=None):
+    state, deadline, = self._blocking(
+        request_iterator, timeout, metadata, credentials)
+    return _end_unary_response_blocking(state, False, deadline)
+
+  def with_call(
+      self, request_iterator, timeout=None, metadata=None, credentials=None):
+    state, deadline, = self._blocking(
+        request_iterator, timeout, metadata, credentials)
+    return _end_unary_response_blocking(state, True, deadline)
 
   def future(
       self, request_iterator, timeout=None, metadata=None, credentials=None):
@@ -575,17 +603,17 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
       call.set_credentials(credentials._credentials)
     event_handler = _event_handler(state, call, self._response_deserializer)
     with state.condition:
-      call.start_batch(
+      call.start_client_batch(
           cygrpc.Operations(
               (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
           event_handler)
       operations = (
           cygrpc.operation_send_initial_metadata(
-              _common.metadata(metadata), _EMPTY_FLAGS),
+              _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
           cygrpc.operation_receive_message(_EMPTY_FLAGS),
           cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
       )
-      call.start_batch(cygrpc.Operations(operations), event_handler)
+      call.start_client_batch(cygrpc.Operations(operations), event_handler)
       _consume_request_iterator(
           request_iterator, state, call, self._request_serializer)
     return _Rendezvous(state, call, self._response_deserializer, deadline)
@@ -612,16 +640,16 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
       call.set_credentials(credentials._credentials)
     event_handler = _event_handler(state, call, self._response_deserializer)
     with state.condition:
-      call.start_batch(
+      call.start_client_batch(
           cygrpc.Operations(
               (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
           event_handler)
       operations = (
           cygrpc.operation_send_initial_metadata(
-              _common.metadata(metadata), _EMPTY_FLAGS),
+              _common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
           cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
       )
-      call.start_batch(cygrpc.Operations(operations), event_handler)
+      call.start_client_batch(cygrpc.Operations(operations), event_handler)
       _consume_request_iterator(
           request_iterator, state, call, self._request_serializer)
     return _Rendezvous(state, call, self._response_deserializer, deadline)
@@ -636,16 +664,27 @@ class _ChannelCallState(object):
     self.managed_calls = None
 
 
-def _call_spin(state):
-  while True:
-    event = state.completion_queue.poll()
-    completed_call = event.tag(event)
-    if completed_call is not None:
-      with state.lock:
-        state.managed_calls.remove(completed_call)
-        if not state.managed_calls:
-          state.managed_calls = None
-          return
+def _run_channel_spin_thread(state):
+  def channel_spin():
+    while True:
+      event = state.completion_queue.poll()
+      completed_call = event.tag(event)
+      if completed_call is not None:
+        with state.lock:
+          state.managed_calls.remove(completed_call)
+          if not state.managed_calls:
+            state.managed_calls = None
+            return
+
+  def stop_channel_spin(timeout):
+    with state.lock:
+      if state.managed_calls is not None:
+        for call in state.managed_calls:
+          call.cancel()
+
+  channel_spin_thread = _common.CleanupThread(
+      stop_channel_spin, target=channel_spin)
+  channel_spin_thread.start()
 
 
 def _create_channel_managed_call(state):
@@ -674,8 +713,7 @@ def _create_channel_managed_call(state):
           parent, flags, state.completion_queue, method, host, deadline)
       if state.managed_calls is None:
         state.managed_calls = set((call,))
-        spin_thread = threading.Thread(target=_call_spin, args=(state,))
-        spin_thread.start()
+        _run_channel_spin_thread(state)
       else:
         state.managed_calls.add(call)
       return call
@@ -768,11 +806,18 @@ def _poll_connectivity(state, channel, initial_try_to_connect):
             _spawn_delivery(state, callbacks)
 
 
+def _moot(state):
+  with state.lock:
+    del state.callbacks_and_connectivities[:]
+
+
 def _subscribe(state, callback, try_to_connect):
   with state.lock:
     if not state.callbacks_and_connectivities and not state.polling:
-      polling_thread = threading.Thread(
-          target=_poll_connectivity,
+      def cancel_all_subscriptions(timeout):
+        _moot(state)
+      polling_thread = _common.CleanupThread(
+          cancel_all_subscriptions, target=_poll_connectivity,
           args=(state, state.channel, bool(try_to_connect)))
       polling_thread.start()
       state.polling = True
@@ -796,19 +841,19 @@ def _unsubscribe(state, callback):
         break
 
 
-def _moot(state):
-  with state.lock:
-    del state.callbacks_and_connectivities[:]
-
-
 def _options(options):
   if options is None:
     pairs = ((cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT),)
   else:
     pairs = list(options) + [
         (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)]
-  return cygrpc.ChannelArgs(
-      cygrpc.ChannelArg(arg_name, arg_value) for arg_name, arg_value in pairs)
+  encoded_pairs = [
+      (_common.encode(arg_name), arg_value) if isinstance(arg_value, int)
+      else (_common.encode(arg_name), _common.encode(arg_value))
+      for arg_name, arg_value in pairs]
+  return cygrpc.ChannelArgs([
+      cygrpc.ChannelArg(arg_name, arg_value)
+      for arg_name, arg_value in encoded_pairs])
 
 
 class Channel(grpc.Channel):
@@ -821,7 +866,8 @@ class Channel(grpc.Channel):
       options: Configuration options for the channel.
       credentials: A cygrpc.ChannelCredentials or None.
     """
-    self._channel = cygrpc.Channel(target, _options(options), credentials)
+    self._channel = cygrpc.Channel(
+        _common.encode(target), _options(options), credentials)
     self._call_state = _ChannelCallState(self._channel)
     self._connectivity_state = _ChannelConnectivityState(self._channel)
 
@@ -834,26 +880,26 @@ class Channel(grpc.Channel):
   def unary_unary(
       self, method, request_serializer=None, response_deserializer=None):
     return _UnaryUnaryMultiCallable(
-        self._channel, _create_channel_managed_call(self._call_state), method,
-        request_serializer, response_deserializer)
+        self._channel, _create_channel_managed_call(self._call_state),
+        _common.encode(method), request_serializer, response_deserializer)
 
   def unary_stream(
       self, method, request_serializer=None, response_deserializer=None):
     return _UnaryStreamMultiCallable(
-        self._channel, _create_channel_managed_call(self._call_state), method,
-        request_serializer, response_deserializer)
+        self._channel, _create_channel_managed_call(self._call_state),
+        _common.encode(method), request_serializer, response_deserializer)
 
   def stream_unary(
       self, method, request_serializer=None, response_deserializer=None):
     return _StreamUnaryMultiCallable(
-        self._channel, _create_channel_managed_call(self._call_state), method,
-        request_serializer, response_deserializer)
+        self._channel, _create_channel_managed_call(self._call_state),
+        _common.encode(method), request_serializer, response_deserializer)
 
   def stream_stream(
       self, method, request_serializer=None, response_deserializer=None):
     return _StreamStreamMultiCallable(
-        self._channel, _create_channel_managed_call(self._call_state), method,
-        request_serializer, response_deserializer)
+        self._channel, _create_channel_managed_call(self._call_state),
+        _common.encode(method), request_serializer, response_deserializer)
 
   def __del__(self):
     _moot(self._connectivity_state)
diff --git a/src/python/grpcio/grpc/_common.py b/src/python/grpcio/grpc/_common.py
index f351bea9e392266d23935ebe0131bf42c9ada199..4d7d5214195eab43ec66f8c68f61851dbe8f04d0 100644
--- a/src/python/grpcio/grpc/_common.py
+++ b/src/python/grpcio/grpc/_common.py
@@ -76,9 +76,37 @@ STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
 }
 
 
-def metadata(application_metadata):
+def encode(s):
+  if isinstance(s, bytes):
+    return s
+  else:
+    return s.encode('ascii')
+
+
+def decode(b):
+  if isinstance(b, str):
+    return b
+  else:
+    try:
+      return b.decode('utf8')
+    except UnicodeDecodeError:
+      logging.exception('Invalid encoding on {}'.format(b))
+      return b.decode('latin1')
+
+
+def cygrpc_metadata(application_metadata):
   return _EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
-      cygrpc.Metadatum(key, value) for key, value in application_metadata)
+      cygrpc.Metadatum(encode(key), encode(value))
+      for key, value in application_metadata)
+
+
+def application_metadata(cygrpc_metadata):
+  if cygrpc_metadata is None:
+    return ()
+  else:
+    return tuple(
+        (decode(key), value if key[-4:] == b'-bin' else decode(value))
+        for key, value in cygrpc_metadata)
 
 
 def _transform(message, transformer, exception_message):
@@ -101,17 +129,8 @@ def deserialize(serialized_message, deserializer):
                     'Exception deserializing message!')
 
 
-def _encode(s):
-  if isinstance(s, bytes):
-    return s
-  else:
-    return s.encode('ascii')
-
-
 def fully_qualified_method(group, method):
-  group = _encode(group)
-  method = _encode(method)
-  return b'/' + group + b'/' + method
+  return '/{}/{}'.format(group, method)
 
 
 class CleanupThread(threading.Thread):
diff --git a/src/python/grpcio/grpc/_links/_constants.py b/src/python/grpcio/grpc/_credential_composition.py
similarity index 71%
rename from src/python/grpcio/grpc/_links/_constants.py
rename to src/python/grpcio/grpc/_credential_composition.py
index 117fc5a639521f2fee42fb1700071beaf0877ea2..9cb5508e27cb65cfd8135d5d0bed5d6a7e12f606 100644
--- a/src/python/grpcio/grpc/_links/_constants.py
+++ b/src/python/grpcio/grpc/_credential_composition.py
@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,16 +27,22 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Constants for use within this package."""
+from grpc._cython import cygrpc
 
-from grpc._adapter import _intermediary_low
-from grpc.beta import interfaces as beta_interfaces
 
-LOW_STATUS_CODE_TO_HIGH_STATUS_CODE = {
-    low: high for low, high in zip(
-        _intermediary_low.Code, beta_interfaces.StatusCode)
-}
+def _call(call_credentialses):
+  call_credentials_iterator = iter(call_credentialses)
+  composition = next(call_credentials_iterator)
+  for additional_call_credentials in call_credentials_iterator:
+    composition = cygrpc.call_credentials_composite(
+        composition, additional_call_credentials)
+  return composition
 
-HIGH_STATUS_CODE_TO_LOW_STATUS_CODE = {
-    high: low for low, high in LOW_STATUS_CODE_TO_HIGH_STATUS_CODE.items()
-}
+
+def call(call_credentialses):
+  return _call(call_credentialses)
+
+
+def channel(channel_credentials, call_credentialses):
+  return cygrpc.channel_credentials_composite(
+      channel_credentials, _call(call_credentialses))
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
index a09bbc75fe655dd0221534d7904701caa76a8c2a..ba60986143cac13030392711fbe44fea84063113 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -37,13 +37,16 @@ cdef class Call:
     self.c_call = NULL
     self.references = []
 
-  def start_batch(self, operations, tag):
+  def _start_batch(self, operations, tag, retain_self):
     if not self.is_valid:
       raise ValueError("invalid call object cannot be used from Python")
     cdef grpc_call_error result
     cdef Operations cy_operations = Operations(operations)
     cdef OperationTag operation_tag = OperationTag(tag)
-    operation_tag.operation_call = self
+    if retain_self:
+      operation_tag.operation_call = self
+    else:
+      operation_tag.operation_call = None
     operation_tag.batch_operations = cy_operations
     cpython.Py_INCREF(operation_tag)
     with nogil:
@@ -52,6 +55,14 @@ cdef class Call:
           <cpython.PyObject *>operation_tag, NULL)
     return result
 
+  def start_client_batch(self, operations, tag):
+    # We don't reference this call in the operations tag because
+    # it should be cancelled when it goes out of scope
+    return self._start_batch(operations, tag, False)
+
+  def start_server_batch(self, operations, tag):
+    return self._start_batch(operations, tag, True)
+
   def cancel(
       self, grpc_status_code error_code=GRPC_STATUS__DO_NOT_USE,
       details=None):
@@ -94,8 +105,7 @@ cdef class Call:
 
   def __dealloc__(self):
     if self.c_call != NULL:
-      with nogil:
-        grpc_call_destroy(self.c_call)
+      grpc_call_destroy(self.c_call)
 
   # The object *should* always be valid from Python. Used for debugging.
   @property
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
index 866cff0d019ef37ca67eed1c598c23d329d5819c..54164014313d07976a163941944dcb7ad587706c 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -32,9 +32,8 @@ cimport cpython
 
 cdef class Channel:
 
-  def __cinit__(self, target, ChannelArgs arguments=None,
+  def __cinit__(self, bytes target, ChannelArgs arguments=None,
                 ChannelCredentials channel_credentials=None):
-    target = str_to_bytes(target)
     cdef grpc_channel_args *c_arguments = NULL
     cdef char *c_target = NULL
     self.c_channel = NULL
@@ -57,8 +56,6 @@ cdef class Channel:
   def create_call(self, Call parent, int flags,
                   CompletionQueue queue not None,
                   method, host, Timespec deadline not None):
-    method = str_to_bytes(method)
-    host = str_to_bytes(host)
     if queue.is_shutting_down:
       raise ValueError("queue must not be shutting down or shutdown")
     cdef char *method_c_string = method
@@ -105,5 +102,4 @@ cdef class Channel:
 
   def __dealloc__(self):
     if self.c_channel != NULL:
-      with nogil:
-        grpc_channel_destroy(self.c_channel)
+      grpc_channel_destroy(self.c_channel)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index 90266516fed55cd5901f4be5af158a295d33fbc3..5955021ceb7c71db9f1d710b3769867304475c75 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -118,18 +118,14 @@ cdef class CompletionQueue:
 
   def __dealloc__(self):
     cdef gpr_timespec c_deadline
-    with nogil:
-      c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
+    c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
     if self.c_completion_queue != NULL:
       # Ensure shutdown
       if not self.is_shutting_down:
-        with nogil:
-          grpc_completion_queue_shutdown(self.c_completion_queue)
-      # Pump the queue
+        grpc_completion_queue_shutdown(self.c_completion_queue)
+      # Pump the queue (All outstanding calls should have been cancelled)
       while not self.is_shutdown:
-        with nogil:
-          event = grpc_completion_queue_next(
-              self.c_completion_queue, c_deadline, NULL)
+        event = grpc_completion_queue_next(
+            self.c_completion_queue, c_deadline, NULL)
         self._interpret_event(event)
-      with nogil:
-        grpc_completion_queue_destroy(self.c_completion_queue)
+      grpc_completion_queue_destroy(self.c_completion_queue)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index 470382d6091f9d97922ab486c82ff7c9e4b589a1..035ac49a8bf14bdd66ef33d1152ff1daebda8dd9 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -46,8 +46,7 @@ cdef class ChannelCredentials:
 
   def __dealloc__(self):
     if self.c_credentials != NULL:
-      with nogil:
-        grpc_channel_credentials_release(self.c_credentials)
+      grpc_channel_credentials_release(self.c_credentials)
 
 
 cdef class CallCredentials:
@@ -64,8 +63,7 @@ cdef class CallCredentials:
 
   def __dealloc__(self):
     if self.c_credentials != NULL:
-      with nogil:
-        grpc_call_credentials_release(self.c_credentials)
+      grpc_call_credentials_release(self.c_credentials)
 
 
 cdef class ServerCredentials:
@@ -76,13 +74,12 @@ cdef class ServerCredentials:
 
   def __dealloc__(self):
     if self.c_credentials != NULL:
-      with nogil:
-        grpc_server_credentials_release(self.c_credentials)
+      grpc_server_credentials_release(self.c_credentials)
 
 
 cdef class CredentialsMetadataPlugin:
 
-  def __cinit__(self, object plugin_callback, name):
+  def __cinit__(self, object plugin_callback, bytes name):
     """
     Args:
       plugin_callback (callable): Callback accepting a service URL (str/bytes)
@@ -91,9 +88,8 @@ cdef class CredentialsMetadataPlugin:
         when called should be non-blocking and eventually call the callback
         object with the appropriate status code/details and metadata (if
         successful).
-      name (str): Plugin name.
+      name (bytes): Plugin name.
     """
-    name = str_to_bytes(name)
     if not callable(plugin_callback):
       raise ValueError('expected callable plugin_callback')
     self.plugin_callback = plugin_callback
@@ -130,8 +126,7 @@ cdef void plugin_get_metadata(
     grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil:
   def python_callback(
       Metadata metadata, grpc_status_code status,
-      error_details):
-    error_details = str_to_bytes(error_details)
+      bytes error_details):
     cb(user_data, metadata.c_metadata_array.metadata,
        metadata.c_metadata_array.count, status, error_details)
   cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 168b9751aa23e9321226c7d43391f72f2d944502..7714504d1bb805278ff841cbb14f6042467ab736 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -37,6 +37,7 @@ cdef extern from "grpc/_cython/loader.h":
   ctypedef long int64_t
 
   int pygrpc_load_core(char*)
+  int pygrpc_initialize_core()
 
   void *gpr_malloc(size_t size) nogil
   void gpr_free(void *ptr) nogil
@@ -131,8 +132,8 @@ cdef extern from "grpc/_cython/loader.h":
   size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil
   void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil
 
-  void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
-                                    grpc_byte_buffer *buffer) nogil
+  int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
+                                   grpc_byte_buffer *buffer) nogil
   int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
                                    gpr_slice *slice) nogil
   void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 0474697af82f18e4fe08788df311caa0c051be07..96c5b02bc2e903d437ba679b49ca3e1420b4956e 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -58,14 +58,14 @@ cdef class Event:
   cdef readonly bint success
   cdef readonly object tag
 
-  # For operations with calls
-  cdef readonly Call operation_call
-
   # For Server.request_call
   cdef readonly bint is_new_request
   cdef readonly CallDetails request_call_details
   cdef readonly Metadata request_metadata
 
+  # For server calls
+  cdef readonly Call operation_call
+
   # For Call.start_batch
   cdef readonly Operations batch_operations
 
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index 0055d0d3a2835bf5928c6f9edebd8b1e98f650a9..54b3d00dfc7baec07f9d2540ffc921922647d5ee 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -231,17 +231,10 @@ cdef class Event:
 
 cdef class ByteBuffer:
 
-  def __cinit__(self, data):
+  def __cinit__(self, bytes data):
     if data is None:
       self.c_byte_buffer = NULL
       return
-    if isinstance(data, ByteBuffer):
-      data = (<ByteBuffer>data).bytes()
-      if data is None:
-        self.c_byte_buffer = NULL
-        return
-    else:
-      data = str_to_bytes(data)
 
     cdef char *c_data = data
     cdef gpr_slice data_slice
@@ -259,9 +252,13 @@ cdef class ByteBuffer:
     cdef gpr_slice data_slice
     cdef size_t data_slice_length
     cdef void *data_slice_pointer
+    cdef bint reader_status
     if self.c_byte_buffer != NULL:
       with nogil:
-        grpc_byte_buffer_reader_init(&reader, self.c_byte_buffer)
+        reader_status = grpc_byte_buffer_reader_init(
+            &reader, self.c_byte_buffer)
+      if not reader_status:
+        return None
       result = bytearray()
       with nogil:
         while grpc_byte_buffer_reader_next(&reader, &data_slice):
@@ -290,32 +287,33 @@ cdef class ByteBuffer:
 
   def __dealloc__(self):
     if self.c_byte_buffer != NULL:
-      with nogil:
-        grpc_byte_buffer_destroy(self.c_byte_buffer)
+      grpc_byte_buffer_destroy(self.c_byte_buffer)
 
 
 cdef class SslPemKeyCertPair:
 
-  def __cinit__(self, private_key, certificate_chain):
-    self.private_key = str_to_bytes(private_key)
-    self.certificate_chain = str_to_bytes(certificate_chain)
+  def __cinit__(self, bytes private_key, bytes certificate_chain):
+    self.private_key = private_key
+    self.certificate_chain = certificate_chain
     self.c_pair.private_key = self.private_key
     self.c_pair.certificate_chain = self.certificate_chain
 
 
 cdef class ChannelArg:
 
-  def __cinit__(self, key, value):
-    self.key = str_to_bytes(key)
+  def __cinit__(self, bytes key, value):
+    self.key = key
     self.c_arg.key = self.key
     if isinstance(value, int):
-      self.value = int(value)
+      self.value = value
       self.c_arg.type = GRPC_ARG_INTEGER
       self.c_arg.value.integer = self.value
-    else:
-      self.value = str_to_bytes(value)
+    elif isinstance(value, bytes):
+      self.value = value
       self.c_arg.type = GRPC_ARG_STRING
       self.c_arg.value.string = self.value
+    else:
+      raise TypeError('Expected int or bytes, got {}'.format(type(value)))
 
 
 cdef class ChannelArgs:
@@ -347,9 +345,9 @@ cdef class ChannelArgs:
 
 cdef class Metadatum:
 
-  def __cinit__(self, key, value):
-    self._key = str_to_bytes(key)
-    self._value = str_to_bytes(value)
+  def __cinit__(self, bytes key, bytes value):
+    self._key = key
+    self._value = value
     self.c_metadata.key = self._key
     self.c_metadata.value = self._value
     self.c_metadata.value_length = len(self._value)
@@ -421,8 +419,7 @@ cdef class Metadata:
     # this frees the allocated memory for the grpc_metadata_array (although
     # it'd be nice if that were documented somewhere...)
     # TODO(atash): document this in the C core
-    with nogil:
-      grpc_metadata_array_destroy(&self.c_metadata_array)
+    grpc_metadata_array_destroy(&self.c_metadata_array)
 
   def __len__(self):
     return self.c_metadata_array.count
@@ -531,8 +528,7 @@ cdef class Operation:
     # Python. The remaining one(s) are primitive fields filled in by GRPC core.
     # This means that we need to clean up after receive_status_on_client.
     if self.c_op.type == GRPC_OP_RECV_STATUS_ON_CLIENT:
-      with nogil:
-        gpr_free(self._received_status_details)
+      gpr_free(self._received_status_details)
 
 def operation_send_initial_metadata(Metadata metadata, int flags):
   cdef Operation op = Operation()
@@ -563,8 +559,7 @@ def operation_send_close_from_client(int flags):
   return op
 
 def operation_send_status_from_server(
-    Metadata metadata, grpc_status_code code, details, int flags):
-  details = str_to_bytes(details)
+    Metadata metadata, grpc_status_code code, bytes details, int flags):
   cdef Operation op = Operation()
   op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
   op.c_op.flags = flags
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index 42afeb849879cfe4a5948094c53b3af455b1009f..4f2d51b03f5c91a8933b1ea5df6e980036334d75 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -101,7 +101,7 @@ cdef class Server:
     # Ensure the core has gotten a chance to do the start-up work
     self.backup_shutdown_queue.poll(Timespec(None))
 
-  def add_http2_port(self, address,
+  def add_http2_port(self, bytes address,
                      ServerCredentials server_credentials=None):
     address = str_to_bytes(address)
     self.references.append(address)
@@ -171,5 +171,4 @@ cdef class Server:
         # much but repeatedly release the GIL and wait
         while not self.is_shutdown:
           time.sleep(0)
-      with nogil:
-        grpc_server_destroy(self.c_server)
+      grpc_server_destroy(self.c_server)
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx
index cf146f5a048ddeb4bd95a65416f4b8c60098cba5..e055d321bc76fe99fcb354889b7ec49c7b264463 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -45,30 +45,15 @@ include "grpc/_cython/_cygrpc/security.pyx.pxi"
 include "grpc/_cython/_cygrpc/server.pyx.pxi"
 
 #
-# Global state
+# initialize gRPC
 #
 
-cdef class _ModuleState:
 
-  cdef bint is_loaded
+def _initialize():
+  if not pygrpc_initialize_core():
+    raise ImportError('failed to initialize core gRPC library')
 
-  def __cinit__(self):
-    if 'win32' in sys.platform:
-      filename = pkg_resources.resource_filename(
-          'grpc._cython', '_windows/grpc_c.64.python')
-      if not pygrpc_load_core(filename):
-        raise ImportError('failed to load core gRPC library')
-    with nogil:
-      grpc_init()
-    self.is_loaded = True
-    with nogil:
-      grpc_set_ssl_roots_override_callback(
+  grpc_set_ssl_roots_override_callback(
           <grpc_ssl_roots_override_callback>ssl_roots_override_callback)
 
-  def __dealloc__(self):
-    if self.is_loaded:
-      with nogil:
-        grpc_shutdown()
-
-_module_state = _ModuleState()
-
+_initialize()
diff --git a/src/python/grpcio/grpc/_cython/imports.generated.c b/src/python/grpcio/grpc/_cython/imports.generated.c
index 8437e74ba019793fd81b3fce4a45ed7521ef6f92..c0080b5a47aa6fcadb3bebc76d9f0f3013c998be 100644
--- a/src/python/grpcio/grpc/_cython/imports.generated.c
+++ b/src/python/grpcio/grpc/_cython/imports.generated.c
@@ -31,560 +31,7 @@
  *
  */
 
+/* TODO(atash) remove cruft */
 #include <grpc/support/port_platform.h>
 
 #include "imports.generated.h"
-
-#ifdef GPR_WINDOWS
-
-census_initialize_type census_initialize_import;
-census_shutdown_type census_shutdown_import;
-census_supported_type census_supported_import;
-census_enabled_type census_enabled_import;
-census_context_create_type census_context_create_import;
-census_context_destroy_type census_context_destroy_import;
-census_context_get_status_type census_context_get_status_import;
-census_context_initialize_iterator_type census_context_initialize_iterator_import;
-census_context_next_tag_type census_context_next_tag_import;
-census_context_get_tag_type census_context_get_tag_import;
-census_context_encode_type census_context_encode_import;
-census_context_decode_type census_context_decode_import;
-census_trace_mask_type census_trace_mask_import;
-census_set_trace_mask_type census_set_trace_mask_import;
-census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import;
-census_start_client_rpc_op_type census_start_client_rpc_op_import;
-census_set_rpc_client_peer_type census_set_rpc_client_peer_import;
-census_start_server_rpc_op_type census_start_server_rpc_op_import;
-census_start_op_type census_start_op_import;
-census_end_op_type census_end_op_import;
-census_trace_print_type census_trace_print_import;
-census_trace_scan_start_type census_trace_scan_start_import;
-census_get_trace_record_type census_get_trace_record_import;
-census_trace_scan_end_type census_trace_scan_end_import;
-census_record_values_type census_record_values_import;
-census_view_create_type census_view_create_import;
-census_view_delete_type census_view_delete_import;
-census_view_metric_type census_view_metric_import;
-census_view_naggregations_type census_view_naggregations_import;
-census_view_tags_type census_view_tags_import;
-census_view_aggregrations_type census_view_aggregrations_import;
-census_view_get_data_type census_view_get_data_import;
-census_view_reset_type census_view_reset_import;
-grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import;
-grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import;
-grpc_compression_algorithm_for_level_type grpc_compression_algorithm_for_level_import;
-grpc_compression_options_init_type grpc_compression_options_init_import;
-grpc_compression_options_enable_algorithm_type grpc_compression_options_enable_algorithm_import;
-grpc_compression_options_disable_algorithm_type grpc_compression_options_disable_algorithm_import;
-grpc_compression_options_is_algorithm_enabled_type grpc_compression_options_is_algorithm_enabled_import;
-grpc_metadata_array_init_type grpc_metadata_array_init_import;
-grpc_metadata_array_destroy_type grpc_metadata_array_destroy_import;
-grpc_call_details_init_type grpc_call_details_init_import;
-grpc_call_details_destroy_type grpc_call_details_destroy_import;
-grpc_register_plugin_type grpc_register_plugin_import;
-grpc_init_type grpc_init_import;
-grpc_shutdown_type grpc_shutdown_import;
-grpc_version_string_type grpc_version_string_import;
-grpc_completion_queue_create_type grpc_completion_queue_create_import;
-grpc_completion_queue_next_type grpc_completion_queue_next_import;
-grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import;
-grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import;
-grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import;
-grpc_alarm_create_type grpc_alarm_create_import;
-grpc_alarm_cancel_type grpc_alarm_cancel_import;
-grpc_alarm_destroy_type grpc_alarm_destroy_import;
-grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import;
-grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
-grpc_channel_create_call_type grpc_channel_create_call_import;
-grpc_channel_ping_type grpc_channel_ping_import;
-grpc_channel_register_call_type grpc_channel_register_call_import;
-grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import;
-grpc_call_start_batch_type grpc_call_start_batch_import;
-grpc_call_get_peer_type grpc_call_get_peer_import;
-grpc_census_call_set_context_type grpc_census_call_set_context_import;
-grpc_census_call_get_context_type grpc_census_call_get_context_import;
-grpc_channel_get_target_type grpc_channel_get_target_import;
-grpc_insecure_channel_create_type grpc_insecure_channel_create_import;
-grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import;
-grpc_channel_destroy_type grpc_channel_destroy_import;
-grpc_call_cancel_type grpc_call_cancel_import;
-grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import;
-grpc_call_destroy_type grpc_call_destroy_import;
-grpc_server_request_call_type grpc_server_request_call_import;
-grpc_server_register_method_type grpc_server_register_method_import;
-grpc_server_request_registered_call_type grpc_server_request_registered_call_import;
-grpc_server_create_type grpc_server_create_import;
-grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import;
-grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import;
-grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import;
-grpc_server_start_type grpc_server_start_import;
-grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import;
-grpc_server_cancel_all_calls_type grpc_server_cancel_all_calls_import;
-grpc_server_destroy_type grpc_server_destroy_import;
-grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import;
-grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
-grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
-grpc_is_binary_header_type grpc_is_binary_header_import;
-grpc_call_error_to_string_type grpc_call_error_to_string_import;
-grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
-grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
-grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
-grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import;
-grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import;
-grpc_auth_context_find_properties_by_name_type grpc_auth_context_find_properties_by_name_import;
-grpc_auth_context_peer_identity_property_name_type grpc_auth_context_peer_identity_property_name_import;
-grpc_auth_context_peer_is_authenticated_type grpc_auth_context_peer_is_authenticated_import;
-grpc_call_auth_context_type grpc_call_auth_context_import;
-grpc_auth_context_release_type grpc_auth_context_release_import;
-grpc_auth_context_add_property_type grpc_auth_context_add_property_import;
-grpc_auth_context_add_cstring_property_type grpc_auth_context_add_cstring_property_import;
-grpc_auth_context_set_peer_identity_property_name_type grpc_auth_context_set_peer_identity_property_name_import;
-grpc_channel_credentials_release_type grpc_channel_credentials_release_import;
-grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import;
-grpc_set_ssl_roots_override_callback_type grpc_set_ssl_roots_override_callback_import;
-grpc_ssl_credentials_create_type grpc_ssl_credentials_create_import;
-grpc_call_credentials_release_type grpc_call_credentials_release_import;
-grpc_composite_channel_credentials_create_type grpc_composite_channel_credentials_create_import;
-grpc_composite_call_credentials_create_type grpc_composite_call_credentials_create_import;
-grpc_google_compute_engine_credentials_create_type grpc_google_compute_engine_credentials_create_import;
-grpc_max_auth_token_lifetime_type grpc_max_auth_token_lifetime_import;
-grpc_service_account_jwt_access_credentials_create_type grpc_service_account_jwt_access_credentials_create_import;
-grpc_google_refresh_token_credentials_create_type grpc_google_refresh_token_credentials_create_import;
-grpc_access_token_credentials_create_type grpc_access_token_credentials_create_import;
-grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_import;
-grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import;
-grpc_secure_channel_create_type grpc_secure_channel_create_import;
-grpc_server_credentials_release_type grpc_server_credentials_release_import;
-grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import;
-grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import;
-grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import;
-grpc_call_set_credentials_type grpc_call_set_credentials_import;
-grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import;
-gpr_malloc_type gpr_malloc_import;
-gpr_free_type gpr_free_import;
-gpr_realloc_type gpr_realloc_import;
-gpr_malloc_aligned_type gpr_malloc_aligned_import;
-gpr_free_aligned_type gpr_free_aligned_import;
-gpr_set_allocation_functions_type gpr_set_allocation_functions_import;
-gpr_get_allocation_functions_type gpr_get_allocation_functions_import;
-grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import;
-grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import;
-grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import;
-grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
-grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
-grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
-grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import;
-grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import;
-grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import;
-grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
-gpr_log_type gpr_log_import;
-gpr_log_message_type gpr_log_message_import;
-gpr_set_log_verbosity_type gpr_set_log_verbosity_import;
-gpr_log_verbosity_init_type gpr_log_verbosity_init_import;
-gpr_set_log_function_type gpr_set_log_function_import;
-gpr_slice_ref_type gpr_slice_ref_import;
-gpr_slice_unref_type gpr_slice_unref_import;
-gpr_slice_new_type gpr_slice_new_import;
-gpr_slice_new_with_len_type gpr_slice_new_with_len_import;
-gpr_slice_malloc_type gpr_slice_malloc_import;
-gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import;
-gpr_slice_from_copied_buffer_type gpr_slice_from_copied_buffer_import;
-gpr_slice_from_static_string_type gpr_slice_from_static_string_import;
-gpr_slice_sub_type gpr_slice_sub_import;
-gpr_slice_sub_no_ref_type gpr_slice_sub_no_ref_import;
-gpr_slice_split_tail_type gpr_slice_split_tail_import;
-gpr_slice_split_head_type gpr_slice_split_head_import;
-gpr_empty_slice_type gpr_empty_slice_import;
-gpr_slice_cmp_type gpr_slice_cmp_import;
-gpr_slice_str_cmp_type gpr_slice_str_cmp_import;
-gpr_slice_buffer_init_type gpr_slice_buffer_init_import;
-gpr_slice_buffer_destroy_type gpr_slice_buffer_destroy_import;
-gpr_slice_buffer_add_type gpr_slice_buffer_add_import;
-gpr_slice_buffer_add_indexed_type gpr_slice_buffer_add_indexed_import;
-gpr_slice_buffer_addn_type gpr_slice_buffer_addn_import;
-gpr_slice_buffer_tiny_add_type gpr_slice_buffer_tiny_add_import;
-gpr_slice_buffer_pop_type gpr_slice_buffer_pop_import;
-gpr_slice_buffer_reset_and_unref_type gpr_slice_buffer_reset_and_unref_import;
-gpr_slice_buffer_swap_type gpr_slice_buffer_swap_import;
-gpr_slice_buffer_move_into_type gpr_slice_buffer_move_into_import;
-gpr_slice_buffer_trim_end_type gpr_slice_buffer_trim_end_import;
-gpr_slice_buffer_move_first_type gpr_slice_buffer_move_first_import;
-gpr_slice_buffer_take_first_type gpr_slice_buffer_take_first_import;
-gpr_mu_init_type gpr_mu_init_import;
-gpr_mu_destroy_type gpr_mu_destroy_import;
-gpr_mu_lock_type gpr_mu_lock_import;
-gpr_mu_unlock_type gpr_mu_unlock_import;
-gpr_mu_trylock_type gpr_mu_trylock_import;
-gpr_cv_init_type gpr_cv_init_import;
-gpr_cv_destroy_type gpr_cv_destroy_import;
-gpr_cv_wait_type gpr_cv_wait_import;
-gpr_cv_signal_type gpr_cv_signal_import;
-gpr_cv_broadcast_type gpr_cv_broadcast_import;
-gpr_once_init_type gpr_once_init_import;
-gpr_event_init_type gpr_event_init_import;
-gpr_event_set_type gpr_event_set_import;
-gpr_event_get_type gpr_event_get_import;
-gpr_event_wait_type gpr_event_wait_import;
-gpr_ref_init_type gpr_ref_init_import;
-gpr_ref_type gpr_ref_import;
-gpr_ref_non_zero_type gpr_ref_non_zero_import;
-gpr_refn_type gpr_refn_import;
-gpr_unref_type gpr_unref_import;
-gpr_stats_init_type gpr_stats_init_import;
-gpr_stats_inc_type gpr_stats_inc_import;
-gpr_stats_read_type gpr_stats_read_import;
-gpr_time_0_type gpr_time_0_import;
-gpr_inf_future_type gpr_inf_future_import;
-gpr_inf_past_type gpr_inf_past_import;
-gpr_time_init_type gpr_time_init_import;
-gpr_now_type gpr_now_import;
-gpr_convert_clock_type_type gpr_convert_clock_type_import;
-gpr_time_cmp_type gpr_time_cmp_import;
-gpr_time_max_type gpr_time_max_import;
-gpr_time_min_type gpr_time_min_import;
-gpr_time_add_type gpr_time_add_import;
-gpr_time_sub_type gpr_time_sub_import;
-gpr_time_from_micros_type gpr_time_from_micros_import;
-gpr_time_from_nanos_type gpr_time_from_nanos_import;
-gpr_time_from_millis_type gpr_time_from_millis_import;
-gpr_time_from_seconds_type gpr_time_from_seconds_import;
-gpr_time_from_minutes_type gpr_time_from_minutes_import;
-gpr_time_from_hours_type gpr_time_from_hours_import;
-gpr_time_to_millis_type gpr_time_to_millis_import;
-gpr_time_similar_type gpr_time_similar_import;
-gpr_sleep_until_type gpr_sleep_until_import;
-gpr_timespec_to_micros_type gpr_timespec_to_micros_import;
-gpr_avl_create_type gpr_avl_create_import;
-gpr_avl_ref_type gpr_avl_ref_import;
-gpr_avl_unref_type gpr_avl_unref_import;
-gpr_avl_add_type gpr_avl_add_import;
-gpr_avl_remove_type gpr_avl_remove_import;
-gpr_avl_get_type gpr_avl_get_import;
-gpr_avl_maybe_get_type gpr_avl_maybe_get_import;
-gpr_avl_is_empty_type gpr_avl_is_empty_import;
-gpr_cmdline_create_type gpr_cmdline_create_import;
-gpr_cmdline_add_int_type gpr_cmdline_add_int_import;
-gpr_cmdline_add_flag_type gpr_cmdline_add_flag_import;
-gpr_cmdline_add_string_type gpr_cmdline_add_string_import;
-gpr_cmdline_on_extra_arg_type gpr_cmdline_on_extra_arg_import;
-gpr_cmdline_set_survive_failure_type gpr_cmdline_set_survive_failure_import;
-gpr_cmdline_parse_type gpr_cmdline_parse_import;
-gpr_cmdline_destroy_type gpr_cmdline_destroy_import;
-gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import;
-gpr_cpu_num_cores_type gpr_cpu_num_cores_import;
-gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import;
-gpr_histogram_create_type gpr_histogram_create_import;
-gpr_histogram_destroy_type gpr_histogram_destroy_import;
-gpr_histogram_add_type gpr_histogram_add_import;
-gpr_histogram_merge_type gpr_histogram_merge_import;
-gpr_histogram_percentile_type gpr_histogram_percentile_import;
-gpr_histogram_mean_type gpr_histogram_mean_import;
-gpr_histogram_stddev_type gpr_histogram_stddev_import;
-gpr_histogram_variance_type gpr_histogram_variance_import;
-gpr_histogram_maximum_type gpr_histogram_maximum_import;
-gpr_histogram_minimum_type gpr_histogram_minimum_import;
-gpr_histogram_count_type gpr_histogram_count_import;
-gpr_histogram_sum_type gpr_histogram_sum_import;
-gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import;
-gpr_histogram_get_contents_type gpr_histogram_get_contents_import;
-gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import;
-gpr_join_host_port_type gpr_join_host_port_import;
-gpr_split_host_port_type gpr_split_host_port_import;
-gpr_format_message_type gpr_format_message_import;
-gpr_strdup_type gpr_strdup_import;
-gpr_asprintf_type gpr_asprintf_import;
-gpr_subprocess_binary_extension_type gpr_subprocess_binary_extension_import;
-gpr_subprocess_create_type gpr_subprocess_create_import;
-gpr_subprocess_destroy_type gpr_subprocess_destroy_import;
-gpr_subprocess_join_type gpr_subprocess_join_import;
-gpr_subprocess_interrupt_type gpr_subprocess_interrupt_import;
-gpr_thd_new_type gpr_thd_new_import;
-gpr_thd_options_default_type gpr_thd_options_default_import;
-gpr_thd_options_set_detached_type gpr_thd_options_set_detached_import;
-gpr_thd_options_set_joinable_type gpr_thd_options_set_joinable_import;
-gpr_thd_options_is_detached_type gpr_thd_options_is_detached_import;
-gpr_thd_options_is_joinable_type gpr_thd_options_is_joinable_import;
-gpr_thd_currentid_type gpr_thd_currentid_import;
-gpr_thd_join_type gpr_thd_join_import;
-
-#ifdef __cplusplus
-extern "C" {
-#endif  /* __cpluslus */
-
-void pygrpc_load_imports(HMODULE library) {
-  census_initialize_import = (census_initialize_type) GetProcAddress(library, "census_initialize");
-  census_shutdown_import = (census_shutdown_type) GetProcAddress(library, "census_shutdown");
-  census_supported_import = (census_supported_type) GetProcAddress(library, "census_supported");
-  census_enabled_import = (census_enabled_type) GetProcAddress(library, "census_enabled");
-  census_context_create_import = (census_context_create_type) GetProcAddress(library, "census_context_create");
-  census_context_destroy_import = (census_context_destroy_type) GetProcAddress(library, "census_context_destroy");
-  census_context_get_status_import = (census_context_get_status_type) GetProcAddress(library, "census_context_get_status");
-  census_context_initialize_iterator_import = (census_context_initialize_iterator_type) GetProcAddress(library, "census_context_initialize_iterator");
-  census_context_next_tag_import = (census_context_next_tag_type) GetProcAddress(library, "census_context_next_tag");
-  census_context_get_tag_import = (census_context_get_tag_type) GetProcAddress(library, "census_context_get_tag");
-  census_context_encode_import = (census_context_encode_type) GetProcAddress(library, "census_context_encode");
-  census_context_decode_import = (census_context_decode_type) GetProcAddress(library, "census_context_decode");
-  census_trace_mask_import = (census_trace_mask_type) GetProcAddress(library, "census_trace_mask");
-  census_set_trace_mask_import = (census_set_trace_mask_type) GetProcAddress(library, "census_set_trace_mask");
-  census_start_rpc_op_timestamp_import = (census_start_rpc_op_timestamp_type) GetProcAddress(library, "census_start_rpc_op_timestamp");
-  census_start_client_rpc_op_import = (census_start_client_rpc_op_type) GetProcAddress(library, "census_start_client_rpc_op");
-  census_set_rpc_client_peer_import = (census_set_rpc_client_peer_type) GetProcAddress(library, "census_set_rpc_client_peer");
-  census_start_server_rpc_op_import = (census_start_server_rpc_op_type) GetProcAddress(library, "census_start_server_rpc_op");
-  census_start_op_import = (census_start_op_type) GetProcAddress(library, "census_start_op");
-  census_end_op_import = (census_end_op_type) GetProcAddress(library, "census_end_op");
-  census_trace_print_import = (census_trace_print_type) GetProcAddress(library, "census_trace_print");
-  census_trace_scan_start_import = (census_trace_scan_start_type) GetProcAddress(library, "census_trace_scan_start");
-  census_get_trace_record_import = (census_get_trace_record_type) GetProcAddress(library, "census_get_trace_record");
-  census_trace_scan_end_import = (census_trace_scan_end_type) GetProcAddress(library, "census_trace_scan_end");
-  census_record_values_import = (census_record_values_type) GetProcAddress(library, "census_record_values");
-  census_view_create_import = (census_view_create_type) GetProcAddress(library, "census_view_create");
-  census_view_delete_import = (census_view_delete_type) GetProcAddress(library, "census_view_delete");
-  census_view_metric_import = (census_view_metric_type) GetProcAddress(library, "census_view_metric");
-  census_view_naggregations_import = (census_view_naggregations_type) GetProcAddress(library, "census_view_naggregations");
-  census_view_tags_import = (census_view_tags_type) GetProcAddress(library, "census_view_tags");
-  census_view_aggregrations_import = (census_view_aggregrations_type) GetProcAddress(library, "census_view_aggregrations");
-  census_view_get_data_import = (census_view_get_data_type) GetProcAddress(library, "census_view_get_data");
-  census_view_reset_import = (census_view_reset_type) GetProcAddress(library, "census_view_reset");
-  grpc_compression_algorithm_parse_import = (grpc_compression_algorithm_parse_type) GetProcAddress(library, "grpc_compression_algorithm_parse");
-  grpc_compression_algorithm_name_import = (grpc_compression_algorithm_name_type) GetProcAddress(library, "grpc_compression_algorithm_name");
-  grpc_compression_algorithm_for_level_import = (grpc_compression_algorithm_for_level_type) GetProcAddress(library, "grpc_compression_algorithm_for_level");
-  grpc_compression_options_init_import = (grpc_compression_options_init_type) GetProcAddress(library, "grpc_compression_options_init");
-  grpc_compression_options_enable_algorithm_import = (grpc_compression_options_enable_algorithm_type) GetProcAddress(library, "grpc_compression_options_enable_algorithm");
-  grpc_compression_options_disable_algorithm_import = (grpc_compression_options_disable_algorithm_type) GetProcAddress(library, "grpc_compression_options_disable_algorithm");
-  grpc_compression_options_is_algorithm_enabled_import = (grpc_compression_options_is_algorithm_enabled_type) GetProcAddress(library, "grpc_compression_options_is_algorithm_enabled");
-  grpc_metadata_array_init_import = (grpc_metadata_array_init_type) GetProcAddress(library, "grpc_metadata_array_init");
-  grpc_metadata_array_destroy_import = (grpc_metadata_array_destroy_type) GetProcAddress(library, "grpc_metadata_array_destroy");
-  grpc_call_details_init_import = (grpc_call_details_init_type) GetProcAddress(library, "grpc_call_details_init");
-  grpc_call_details_destroy_import = (grpc_call_details_destroy_type) GetProcAddress(library, "grpc_call_details_destroy");
-  grpc_register_plugin_import = (grpc_register_plugin_type) GetProcAddress(library, "grpc_register_plugin");
-  grpc_init_import = (grpc_init_type) GetProcAddress(library, "grpc_init");
-  grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown");
-  grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string");
-  grpc_completion_queue_create_import = (grpc_completion_queue_create_type) GetProcAddress(library, "grpc_completion_queue_create");
-  grpc_completion_queue_next_import = (grpc_completion_queue_next_type) GetProcAddress(library, "grpc_completion_queue_next");
-  grpc_completion_queue_pluck_import = (grpc_completion_queue_pluck_type) GetProcAddress(library, "grpc_completion_queue_pluck");
-  grpc_completion_queue_shutdown_import = (grpc_completion_queue_shutdown_type) GetProcAddress(library, "grpc_completion_queue_shutdown");
-  grpc_completion_queue_destroy_import = (grpc_completion_queue_destroy_type) GetProcAddress(library, "grpc_completion_queue_destroy");
-  grpc_alarm_create_import = (grpc_alarm_create_type) GetProcAddress(library, "grpc_alarm_create");
-  grpc_alarm_cancel_import = (grpc_alarm_cancel_type) GetProcAddress(library, "grpc_alarm_cancel");
-  grpc_alarm_destroy_import = (grpc_alarm_destroy_type) GetProcAddress(library, "grpc_alarm_destroy");
-  grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state");
-  grpc_channel_watch_connectivity_state_import = (grpc_channel_watch_connectivity_state_type) GetProcAddress(library, "grpc_channel_watch_connectivity_state");
-  grpc_channel_create_call_import = (grpc_channel_create_call_type) GetProcAddress(library, "grpc_channel_create_call");
-  grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping");
-  grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call");
-  grpc_channel_create_registered_call_import = (grpc_channel_create_registered_call_type) GetProcAddress(library, "grpc_channel_create_registered_call");
-  grpc_call_start_batch_import = (grpc_call_start_batch_type) GetProcAddress(library, "grpc_call_start_batch");
-  grpc_call_get_peer_import = (grpc_call_get_peer_type) GetProcAddress(library, "grpc_call_get_peer");
-  grpc_census_call_set_context_import = (grpc_census_call_set_context_type) GetProcAddress(library, "grpc_census_call_set_context");
-  grpc_census_call_get_context_import = (grpc_census_call_get_context_type) GetProcAddress(library, "grpc_census_call_get_context");
-  grpc_channel_get_target_import = (grpc_channel_get_target_type) GetProcAddress(library, "grpc_channel_get_target");
-  grpc_insecure_channel_create_import = (grpc_insecure_channel_create_type) GetProcAddress(library, "grpc_insecure_channel_create");
-  grpc_lame_client_channel_create_import = (grpc_lame_client_channel_create_type) GetProcAddress(library, "grpc_lame_client_channel_create");
-  grpc_channel_destroy_import = (grpc_channel_destroy_type) GetProcAddress(library, "grpc_channel_destroy");
-  grpc_call_cancel_import = (grpc_call_cancel_type) GetProcAddress(library, "grpc_call_cancel");
-  grpc_call_cancel_with_status_import = (grpc_call_cancel_with_status_type) GetProcAddress(library, "grpc_call_cancel_with_status");
-  grpc_call_destroy_import = (grpc_call_destroy_type) GetProcAddress(library, "grpc_call_destroy");
-  grpc_server_request_call_import = (grpc_server_request_call_type) GetProcAddress(library, "grpc_server_request_call");
-  grpc_server_register_method_import = (grpc_server_register_method_type) GetProcAddress(library, "grpc_server_register_method");
-  grpc_server_request_registered_call_import = (grpc_server_request_registered_call_type) GetProcAddress(library, "grpc_server_request_registered_call");
-  grpc_server_create_import = (grpc_server_create_type) GetProcAddress(library, "grpc_server_create");
-  grpc_server_register_completion_queue_import = (grpc_server_register_completion_queue_type) GetProcAddress(library, "grpc_server_register_completion_queue");
-  grpc_server_register_non_listening_completion_queue_import = (grpc_server_register_non_listening_completion_queue_type) GetProcAddress(library, "grpc_server_register_non_listening_completion_queue");
-  grpc_server_add_insecure_http2_port_import = (grpc_server_add_insecure_http2_port_type) GetProcAddress(library, "grpc_server_add_insecure_http2_port");
-  grpc_server_start_import = (grpc_server_start_type) GetProcAddress(library, "grpc_server_start");
-  grpc_server_shutdown_and_notify_import = (grpc_server_shutdown_and_notify_type) GetProcAddress(library, "grpc_server_shutdown_and_notify");
-  grpc_server_cancel_all_calls_import = (grpc_server_cancel_all_calls_type) GetProcAddress(library, "grpc_server_cancel_all_calls");
-  grpc_server_destroy_import = (grpc_server_destroy_type) GetProcAddress(library, "grpc_server_destroy");
-  grpc_tracer_set_enabled_import = (grpc_tracer_set_enabled_type) GetProcAddress(library, "grpc_tracer_set_enabled");
-  grpc_header_key_is_legal_import = (grpc_header_key_is_legal_type) GetProcAddress(library, "grpc_header_key_is_legal");
-  grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal");
-  grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header");
-  grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
-  grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
-  grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
-  grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next");
-  grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator");
-  grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity");
-  grpc_auth_context_find_properties_by_name_import = (grpc_auth_context_find_properties_by_name_type) GetProcAddress(library, "grpc_auth_context_find_properties_by_name");
-  grpc_auth_context_peer_identity_property_name_import = (grpc_auth_context_peer_identity_property_name_type) GetProcAddress(library, "grpc_auth_context_peer_identity_property_name");
-  grpc_auth_context_peer_is_authenticated_import = (grpc_auth_context_peer_is_authenticated_type) GetProcAddress(library, "grpc_auth_context_peer_is_authenticated");
-  grpc_call_auth_context_import = (grpc_call_auth_context_type) GetProcAddress(library, "grpc_call_auth_context");
-  grpc_auth_context_release_import = (grpc_auth_context_release_type) GetProcAddress(library, "grpc_auth_context_release");
-  grpc_auth_context_add_property_import = (grpc_auth_context_add_property_type) GetProcAddress(library, "grpc_auth_context_add_property");
-  grpc_auth_context_add_cstring_property_import = (grpc_auth_context_add_cstring_property_type) GetProcAddress(library, "grpc_auth_context_add_cstring_property");
-  grpc_auth_context_set_peer_identity_property_name_import = (grpc_auth_context_set_peer_identity_property_name_type) GetProcAddress(library, "grpc_auth_context_set_peer_identity_property_name");
-  grpc_channel_credentials_release_import = (grpc_channel_credentials_release_type) GetProcAddress(library, "grpc_channel_credentials_release");
-  grpc_google_default_credentials_create_import = (grpc_google_default_credentials_create_type) GetProcAddress(library, "grpc_google_default_credentials_create");
-  grpc_set_ssl_roots_override_callback_import = (grpc_set_ssl_roots_override_callback_type) GetProcAddress(library, "grpc_set_ssl_roots_override_callback");
-  grpc_ssl_credentials_create_import = (grpc_ssl_credentials_create_type) GetProcAddress(library, "grpc_ssl_credentials_create");
-  grpc_call_credentials_release_import = (grpc_call_credentials_release_type) GetProcAddress(library, "grpc_call_credentials_release");
-  grpc_composite_channel_credentials_create_import = (grpc_composite_channel_credentials_create_type) GetProcAddress(library, "grpc_composite_channel_credentials_create");
-  grpc_composite_call_credentials_create_import = (grpc_composite_call_credentials_create_type) GetProcAddress(library, "grpc_composite_call_credentials_create");
-  grpc_google_compute_engine_credentials_create_import = (grpc_google_compute_engine_credentials_create_type) GetProcAddress(library, "grpc_google_compute_engine_credentials_create");
-  grpc_max_auth_token_lifetime_import = (grpc_max_auth_token_lifetime_type) GetProcAddress(library, "grpc_max_auth_token_lifetime");
-  grpc_service_account_jwt_access_credentials_create_import = (grpc_service_account_jwt_access_credentials_create_type) GetProcAddress(library, "grpc_service_account_jwt_access_credentials_create");
-  grpc_google_refresh_token_credentials_create_import = (grpc_google_refresh_token_credentials_create_type) GetProcAddress(library, "grpc_google_refresh_token_credentials_create");
-  grpc_access_token_credentials_create_import = (grpc_access_token_credentials_create_type) GetProcAddress(library, "grpc_access_token_credentials_create");
-  grpc_google_iam_credentials_create_import = (grpc_google_iam_credentials_create_type) GetProcAddress(library, "grpc_google_iam_credentials_create");
-  grpc_metadata_credentials_create_from_plugin_import = (grpc_metadata_credentials_create_from_plugin_type) GetProcAddress(library, "grpc_metadata_credentials_create_from_plugin");
-  grpc_secure_channel_create_import = (grpc_secure_channel_create_type) GetProcAddress(library, "grpc_secure_channel_create");
-  grpc_server_credentials_release_import = (grpc_server_credentials_release_type) GetProcAddress(library, "grpc_server_credentials_release");
-  grpc_ssl_server_credentials_create_import = (grpc_ssl_server_credentials_create_type) GetProcAddress(library, "grpc_ssl_server_credentials_create");
-  grpc_ssl_server_credentials_create_ex_import = (grpc_ssl_server_credentials_create_ex_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_ex");
-  grpc_server_add_secure_http2_port_import = (grpc_server_add_secure_http2_port_type) GetProcAddress(library, "grpc_server_add_secure_http2_port");
-  grpc_call_set_credentials_import = (grpc_call_set_credentials_type) GetProcAddress(library, "grpc_call_set_credentials");
-  grpc_server_credentials_set_auth_metadata_processor_import = (grpc_server_credentials_set_auth_metadata_processor_type) GetProcAddress(library, "grpc_server_credentials_set_auth_metadata_processor");
-  gpr_malloc_import = (gpr_malloc_type) GetProcAddress(library, "gpr_malloc");
-  gpr_free_import = (gpr_free_type) GetProcAddress(library, "gpr_free");
-  gpr_realloc_import = (gpr_realloc_type) GetProcAddress(library, "gpr_realloc");
-  gpr_malloc_aligned_import = (gpr_malloc_aligned_type) GetProcAddress(library, "gpr_malloc_aligned");
-  gpr_free_aligned_import = (gpr_free_aligned_type) GetProcAddress(library, "gpr_free_aligned");
-  gpr_set_allocation_functions_import = (gpr_set_allocation_functions_type) GetProcAddress(library, "gpr_set_allocation_functions");
-  gpr_get_allocation_functions_import = (gpr_get_allocation_functions_type) GetProcAddress(library, "gpr_get_allocation_functions");
-  grpc_raw_byte_buffer_create_import = (grpc_raw_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_byte_buffer_create");
-  grpc_raw_compressed_byte_buffer_create_import = (grpc_raw_compressed_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_compressed_byte_buffer_create");
-  grpc_byte_buffer_copy_import = (grpc_byte_buffer_copy_type) GetProcAddress(library, "grpc_byte_buffer_copy");
-  grpc_byte_buffer_length_import = (grpc_byte_buffer_length_type) GetProcAddress(library, "grpc_byte_buffer_length");
-  grpc_byte_buffer_destroy_import = (grpc_byte_buffer_destroy_type) GetProcAddress(library, "grpc_byte_buffer_destroy");
-  grpc_byte_buffer_reader_init_import = (grpc_byte_buffer_reader_init_type) GetProcAddress(library, "grpc_byte_buffer_reader_init");
-  grpc_byte_buffer_reader_destroy_import = (grpc_byte_buffer_reader_destroy_type) GetProcAddress(library, "grpc_byte_buffer_reader_destroy");
-  grpc_byte_buffer_reader_next_import = (grpc_byte_buffer_reader_next_type) GetProcAddress(library, "grpc_byte_buffer_reader_next");
-  grpc_byte_buffer_reader_readall_import = (grpc_byte_buffer_reader_readall_type) GetProcAddress(library, "grpc_byte_buffer_reader_readall");
-  grpc_raw_byte_buffer_from_reader_import = (grpc_raw_byte_buffer_from_reader_type) GetProcAddress(library, "grpc_raw_byte_buffer_from_reader");
-  gpr_log_import = (gpr_log_type) GetProcAddress(library, "gpr_log");
-  gpr_log_message_import = (gpr_log_message_type) GetProcAddress(library, "gpr_log_message");
-  gpr_set_log_verbosity_import = (gpr_set_log_verbosity_type) GetProcAddress(library, "gpr_set_log_verbosity");
-  gpr_log_verbosity_init_import = (gpr_log_verbosity_init_type) GetProcAddress(library, "gpr_log_verbosity_init");
-  gpr_set_log_function_import = (gpr_set_log_function_type) GetProcAddress(library, "gpr_set_log_function");
-  gpr_slice_ref_import = (gpr_slice_ref_type) GetProcAddress(library, "gpr_slice_ref");
-  gpr_slice_unref_import = (gpr_slice_unref_type) GetProcAddress(library, "gpr_slice_unref");
-  gpr_slice_new_import = (gpr_slice_new_type) GetProcAddress(library, "gpr_slice_new");
-  gpr_slice_new_with_len_import = (gpr_slice_new_with_len_type) GetProcAddress(library, "gpr_slice_new_with_len");
-  gpr_slice_malloc_import = (gpr_slice_malloc_type) GetProcAddress(library, "gpr_slice_malloc");
-  gpr_slice_from_copied_string_import = (gpr_slice_from_copied_string_type) GetProcAddress(library, "gpr_slice_from_copied_string");
-  gpr_slice_from_copied_buffer_import = (gpr_slice_from_copied_buffer_type) GetProcAddress(library, "gpr_slice_from_copied_buffer");
-  gpr_slice_from_static_string_import = (gpr_slice_from_static_string_type) GetProcAddress(library, "gpr_slice_from_static_string");
-  gpr_slice_sub_import = (gpr_slice_sub_type) GetProcAddress(library, "gpr_slice_sub");
-  gpr_slice_sub_no_ref_import = (gpr_slice_sub_no_ref_type) GetProcAddress(library, "gpr_slice_sub_no_ref");
-  gpr_slice_split_tail_import = (gpr_slice_split_tail_type) GetProcAddress(library, "gpr_slice_split_tail");
-  gpr_slice_split_head_import = (gpr_slice_split_head_type) GetProcAddress(library, "gpr_slice_split_head");
-  gpr_empty_slice_import = (gpr_empty_slice_type) GetProcAddress(library, "gpr_empty_slice");
-  gpr_slice_cmp_import = (gpr_slice_cmp_type) GetProcAddress(library, "gpr_slice_cmp");
-  gpr_slice_str_cmp_import = (gpr_slice_str_cmp_type) GetProcAddress(library, "gpr_slice_str_cmp");
-  gpr_slice_buffer_init_import = (gpr_slice_buffer_init_type) GetProcAddress(library, "gpr_slice_buffer_init");
-  gpr_slice_buffer_destroy_import = (gpr_slice_buffer_destroy_type) GetProcAddress(library, "gpr_slice_buffer_destroy");
-  gpr_slice_buffer_add_import = (gpr_slice_buffer_add_type) GetProcAddress(library, "gpr_slice_buffer_add");
-  gpr_slice_buffer_add_indexed_import = (gpr_slice_buffer_add_indexed_type) GetProcAddress(library, "gpr_slice_buffer_add_indexed");
-  gpr_slice_buffer_addn_import = (gpr_slice_buffer_addn_type) GetProcAddress(library, "gpr_slice_buffer_addn");
-  gpr_slice_buffer_tiny_add_import = (gpr_slice_buffer_tiny_add_type) GetProcAddress(library, "gpr_slice_buffer_tiny_add");
-  gpr_slice_buffer_pop_import = (gpr_slice_buffer_pop_type) GetProcAddress(library, "gpr_slice_buffer_pop");
-  gpr_slice_buffer_reset_and_unref_import = (gpr_slice_buffer_reset_and_unref_type) GetProcAddress(library, "gpr_slice_buffer_reset_and_unref");
-  gpr_slice_buffer_swap_import = (gpr_slice_buffer_swap_type) GetProcAddress(library, "gpr_slice_buffer_swap");
-  gpr_slice_buffer_move_into_import = (gpr_slice_buffer_move_into_type) GetProcAddress(library, "gpr_slice_buffer_move_into");
-  gpr_slice_buffer_trim_end_import = (gpr_slice_buffer_trim_end_type) GetProcAddress(library, "gpr_slice_buffer_trim_end");
-  gpr_slice_buffer_move_first_import = (gpr_slice_buffer_move_first_type) GetProcAddress(library, "gpr_slice_buffer_move_first");
-  gpr_slice_buffer_take_first_import = (gpr_slice_buffer_take_first_type) GetProcAddress(library, "gpr_slice_buffer_take_first");
-  gpr_mu_init_import = (gpr_mu_init_type) GetProcAddress(library, "gpr_mu_init");
-  gpr_mu_destroy_import = (gpr_mu_destroy_type) GetProcAddress(library, "gpr_mu_destroy");
-  gpr_mu_lock_import = (gpr_mu_lock_type) GetProcAddress(library, "gpr_mu_lock");
-  gpr_mu_unlock_import = (gpr_mu_unlock_type) GetProcAddress(library, "gpr_mu_unlock");
-  gpr_mu_trylock_import = (gpr_mu_trylock_type) GetProcAddress(library, "gpr_mu_trylock");
-  gpr_cv_init_import = (gpr_cv_init_type) GetProcAddress(library, "gpr_cv_init");
-  gpr_cv_destroy_import = (gpr_cv_destroy_type) GetProcAddress(library, "gpr_cv_destroy");
-  gpr_cv_wait_import = (gpr_cv_wait_type) GetProcAddress(library, "gpr_cv_wait");
-  gpr_cv_signal_import = (gpr_cv_signal_type) GetProcAddress(library, "gpr_cv_signal");
-  gpr_cv_broadcast_import = (gpr_cv_broadcast_type) GetProcAddress(library, "gpr_cv_broadcast");
-  gpr_once_init_import = (gpr_once_init_type) GetProcAddress(library, "gpr_once_init");
-  gpr_event_init_import = (gpr_event_init_type) GetProcAddress(library, "gpr_event_init");
-  gpr_event_set_import = (gpr_event_set_type) GetProcAddress(library, "gpr_event_set");
-  gpr_event_get_import = (gpr_event_get_type) GetProcAddress(library, "gpr_event_get");
-  gpr_event_wait_import = (gpr_event_wait_type) GetProcAddress(library, "gpr_event_wait");
-  gpr_ref_init_import = (gpr_ref_init_type) GetProcAddress(library, "gpr_ref_init");
-  gpr_ref_import = (gpr_ref_type) GetProcAddress(library, "gpr_ref");
-  gpr_ref_non_zero_import = (gpr_ref_non_zero_type) GetProcAddress(library, "gpr_ref_non_zero");
-  gpr_refn_import = (gpr_refn_type) GetProcAddress(library, "gpr_refn");
-  gpr_unref_import = (gpr_unref_type) GetProcAddress(library, "gpr_unref");
-  gpr_stats_init_import = (gpr_stats_init_type) GetProcAddress(library, "gpr_stats_init");
-  gpr_stats_inc_import = (gpr_stats_inc_type) GetProcAddress(library, "gpr_stats_inc");
-  gpr_stats_read_import = (gpr_stats_read_type) GetProcAddress(library, "gpr_stats_read");
-  gpr_time_0_import = (gpr_time_0_type) GetProcAddress(library, "gpr_time_0");
-  gpr_inf_future_import = (gpr_inf_future_type) GetProcAddress(library, "gpr_inf_future");
-  gpr_inf_past_import = (gpr_inf_past_type) GetProcAddress(library, "gpr_inf_past");
-  gpr_time_init_import = (gpr_time_init_type) GetProcAddress(library, "gpr_time_init");
-  gpr_now_import = (gpr_now_type) GetProcAddress(library, "gpr_now");
-  gpr_convert_clock_type_import = (gpr_convert_clock_type_type) GetProcAddress(library, "gpr_convert_clock_type");
-  gpr_time_cmp_import = (gpr_time_cmp_type) GetProcAddress(library, "gpr_time_cmp");
-  gpr_time_max_import = (gpr_time_max_type) GetProcAddress(library, "gpr_time_max");
-  gpr_time_min_import = (gpr_time_min_type) GetProcAddress(library, "gpr_time_min");
-  gpr_time_add_import = (gpr_time_add_type) GetProcAddress(library, "gpr_time_add");
-  gpr_time_sub_import = (gpr_time_sub_type) GetProcAddress(library, "gpr_time_sub");
-  gpr_time_from_micros_import = (gpr_time_from_micros_type) GetProcAddress(library, "gpr_time_from_micros");
-  gpr_time_from_nanos_import = (gpr_time_from_nanos_type) GetProcAddress(library, "gpr_time_from_nanos");
-  gpr_time_from_millis_import = (gpr_time_from_millis_type) GetProcAddress(library, "gpr_time_from_millis");
-  gpr_time_from_seconds_import = (gpr_time_from_seconds_type) GetProcAddress(library, "gpr_time_from_seconds");
-  gpr_time_from_minutes_import = (gpr_time_from_minutes_type) GetProcAddress(library, "gpr_time_from_minutes");
-  gpr_time_from_hours_import = (gpr_time_from_hours_type) GetProcAddress(library, "gpr_time_from_hours");
-  gpr_time_to_millis_import = (gpr_time_to_millis_type) GetProcAddress(library, "gpr_time_to_millis");
-  gpr_time_similar_import = (gpr_time_similar_type) GetProcAddress(library, "gpr_time_similar");
-  gpr_sleep_until_import = (gpr_sleep_until_type) GetProcAddress(library, "gpr_sleep_until");
-  gpr_timespec_to_micros_import = (gpr_timespec_to_micros_type) GetProcAddress(library, "gpr_timespec_to_micros");
-  gpr_avl_create_import = (gpr_avl_create_type) GetProcAddress(library, "gpr_avl_create");
-  gpr_avl_ref_import = (gpr_avl_ref_type) GetProcAddress(library, "gpr_avl_ref");
-  gpr_avl_unref_import = (gpr_avl_unref_type) GetProcAddress(library, "gpr_avl_unref");
-  gpr_avl_add_import = (gpr_avl_add_type) GetProcAddress(library, "gpr_avl_add");
-  gpr_avl_remove_import = (gpr_avl_remove_type) GetProcAddress(library, "gpr_avl_remove");
-  gpr_avl_get_import = (gpr_avl_get_type) GetProcAddress(library, "gpr_avl_get");
-  gpr_avl_maybe_get_import = (gpr_avl_maybe_get_type) GetProcAddress(library, "gpr_avl_maybe_get");
-  gpr_avl_is_empty_import = (gpr_avl_is_empty_type) GetProcAddress(library, "gpr_avl_is_empty");
-  gpr_cmdline_create_import = (gpr_cmdline_create_type) GetProcAddress(library, "gpr_cmdline_create");
-  gpr_cmdline_add_int_import = (gpr_cmdline_add_int_type) GetProcAddress(library, "gpr_cmdline_add_int");
-  gpr_cmdline_add_flag_import = (gpr_cmdline_add_flag_type) GetProcAddress(library, "gpr_cmdline_add_flag");
-  gpr_cmdline_add_string_import = (gpr_cmdline_add_string_type) GetProcAddress(library, "gpr_cmdline_add_string");
-  gpr_cmdline_on_extra_arg_import = (gpr_cmdline_on_extra_arg_type) GetProcAddress(library, "gpr_cmdline_on_extra_arg");
-  gpr_cmdline_set_survive_failure_import = (gpr_cmdline_set_survive_failure_type) GetProcAddress(library, "gpr_cmdline_set_survive_failure");
-  gpr_cmdline_parse_import = (gpr_cmdline_parse_type) GetProcAddress(library, "gpr_cmdline_parse");
-  gpr_cmdline_destroy_import = (gpr_cmdline_destroy_type) GetProcAddress(library, "gpr_cmdline_destroy");
-  gpr_cmdline_usage_string_import = (gpr_cmdline_usage_string_type) GetProcAddress(library, "gpr_cmdline_usage_string");
-  gpr_cpu_num_cores_import = (gpr_cpu_num_cores_type) GetProcAddress(library, "gpr_cpu_num_cores");
-  gpr_cpu_current_cpu_import = (gpr_cpu_current_cpu_type) GetProcAddress(library, "gpr_cpu_current_cpu");
-  gpr_histogram_create_import = (gpr_histogram_create_type) GetProcAddress(library, "gpr_histogram_create");
-  gpr_histogram_destroy_import = (gpr_histogram_destroy_type) GetProcAddress(library, "gpr_histogram_destroy");
-  gpr_histogram_add_import = (gpr_histogram_add_type) GetProcAddress(library, "gpr_histogram_add");
-  gpr_histogram_merge_import = (gpr_histogram_merge_type) GetProcAddress(library, "gpr_histogram_merge");
-  gpr_histogram_percentile_import = (gpr_histogram_percentile_type) GetProcAddress(library, "gpr_histogram_percentile");
-  gpr_histogram_mean_import = (gpr_histogram_mean_type) GetProcAddress(library, "gpr_histogram_mean");
-  gpr_histogram_stddev_import = (gpr_histogram_stddev_type) GetProcAddress(library, "gpr_histogram_stddev");
-  gpr_histogram_variance_import = (gpr_histogram_variance_type) GetProcAddress(library, "gpr_histogram_variance");
-  gpr_histogram_maximum_import = (gpr_histogram_maximum_type) GetProcAddress(library, "gpr_histogram_maximum");
-  gpr_histogram_minimum_import = (gpr_histogram_minimum_type) GetProcAddress(library, "gpr_histogram_minimum");
-  gpr_histogram_count_import = (gpr_histogram_count_type) GetProcAddress(library, "gpr_histogram_count");
-  gpr_histogram_sum_import = (gpr_histogram_sum_type) GetProcAddress(library, "gpr_histogram_sum");
-  gpr_histogram_sum_of_squares_import = (gpr_histogram_sum_of_squares_type) GetProcAddress(library, "gpr_histogram_sum_of_squares");
-  gpr_histogram_get_contents_import = (gpr_histogram_get_contents_type) GetProcAddress(library, "gpr_histogram_get_contents");
-  gpr_histogram_merge_contents_import = (gpr_histogram_merge_contents_type) GetProcAddress(library, "gpr_histogram_merge_contents");
-  gpr_join_host_port_import = (gpr_join_host_port_type) GetProcAddress(library, "gpr_join_host_port");
-  gpr_split_host_port_import = (gpr_split_host_port_type) GetProcAddress(library, "gpr_split_host_port");
-  gpr_format_message_import = (gpr_format_message_type) GetProcAddress(library, "gpr_format_message");
-  gpr_strdup_import = (gpr_strdup_type) GetProcAddress(library, "gpr_strdup");
-  gpr_asprintf_import = (gpr_asprintf_type) GetProcAddress(library, "gpr_asprintf");
-  gpr_subprocess_binary_extension_import = (gpr_subprocess_binary_extension_type) GetProcAddress(library, "gpr_subprocess_binary_extension");
-  gpr_subprocess_create_import = (gpr_subprocess_create_type) GetProcAddress(library, "gpr_subprocess_create");
-  gpr_subprocess_destroy_import = (gpr_subprocess_destroy_type) GetProcAddress(library, "gpr_subprocess_destroy");
-  gpr_subprocess_join_import = (gpr_subprocess_join_type) GetProcAddress(library, "gpr_subprocess_join");
-  gpr_subprocess_interrupt_import = (gpr_subprocess_interrupt_type) GetProcAddress(library, "gpr_subprocess_interrupt");
-  gpr_thd_new_import = (gpr_thd_new_type) GetProcAddress(library, "gpr_thd_new");
-  gpr_thd_options_default_import = (gpr_thd_options_default_type) GetProcAddress(library, "gpr_thd_options_default");
-  gpr_thd_options_set_detached_import = (gpr_thd_options_set_detached_type) GetProcAddress(library, "gpr_thd_options_set_detached");
-  gpr_thd_options_set_joinable_import = (gpr_thd_options_set_joinable_type) GetProcAddress(library, "gpr_thd_options_set_joinable");
-  gpr_thd_options_is_detached_import = (gpr_thd_options_is_detached_type) GetProcAddress(library, "gpr_thd_options_is_detached");
-  gpr_thd_options_is_joinable_import = (gpr_thd_options_is_joinable_type) GetProcAddress(library, "gpr_thd_options_is_joinable");
-  gpr_thd_currentid_import = (gpr_thd_currentid_type) GetProcAddress(library, "gpr_thd_currentid");
-  gpr_thd_join_import = (gpr_thd_join_type) GetProcAddress(library, "gpr_thd_join");
-}
-
-#ifdef __cplusplus
-}
-#endif  /* __cpluslus */
-
-#endif /* !GPR_WINDOWS */
diff --git a/src/python/grpcio/grpc/_cython/imports.generated.h b/src/python/grpcio/grpc/_cython/imports.generated.h
index d52e8591b39873a783c15dbdd849cde2ee015de9..8e5c9a8ce2ba4db408c7d5c9625838b652bfc313 100644
--- a/src/python/grpcio/grpc/_cython/imports.generated.h
+++ b/src/python/grpcio/grpc/_cython/imports.generated.h
@@ -31,857 +31,12 @@
  *
  */
 
+/* TODO(atash) remove cruft */
 #ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_
 #define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_
 
 #include <grpc/support/port_platform.h>
 
-#ifdef GPR_WINDOWS
-
-#include <windows.h>
-
-#include <grpc/census.h>
-#include <grpc/compression.h>
-#include <grpc/grpc.h>
-#include <grpc/grpc_posix.h>
-#include <grpc/grpc_security.h>
-#include <grpc/impl/codegen/alloc.h>
-#include <grpc/impl/codegen/byte_buffer.h>
-#include <grpc/impl/codegen/log.h>
-#include <grpc/impl/codegen/slice.h>
-#include <grpc/impl/codegen/slice_buffer.h>
-#include <grpc/impl/codegen/sync.h>
-#include <grpc/impl/codegen/time.h>
-#include <grpc/support/avl.h>
-#include <grpc/support/cmdline.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/histogram.h>
-#include <grpc/support/host_port.h>
-#include <grpc/support/log_windows.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/subprocess.h>
-#include <grpc/support/thd.h>
-
-typedef int(*census_initialize_type)(int features);
-extern census_initialize_type census_initialize_import;
-#define census_initialize census_initialize_import
-typedef void(*census_shutdown_type)(void);
-extern census_shutdown_type census_shutdown_import;
-#define census_shutdown census_shutdown_import
-typedef int(*census_supported_type)(void);
-extern census_supported_type census_supported_import;
-#define census_supported census_supported_import
-typedef int(*census_enabled_type)(void);
-extern census_enabled_type census_enabled_import;
-#define census_enabled census_enabled_import
-typedef census_context *(*census_context_create_type)(const census_context *base, const census_tag *tags, int ntags, census_context_status const **status);
-extern census_context_create_type census_context_create_import;
-#define census_context_create census_context_create_import
-typedef void(*census_context_destroy_type)(census_context *context);
-extern census_context_destroy_type census_context_destroy_import;
-#define census_context_destroy census_context_destroy_import
-typedef const census_context_status *(*census_context_get_status_type)(const census_context *context);
-extern census_context_get_status_type census_context_get_status_import;
-#define census_context_get_status census_context_get_status_import
-typedef void(*census_context_initialize_iterator_type)(const census_context *context, census_context_iterator *iterator);
-extern census_context_initialize_iterator_type census_context_initialize_iterator_import;
-#define census_context_initialize_iterator census_context_initialize_iterator_import
-typedef int(*census_context_next_tag_type)(census_context_iterator *iterator, census_tag *tag);
-extern census_context_next_tag_type census_context_next_tag_import;
-#define census_context_next_tag census_context_next_tag_import
-typedef int(*census_context_get_tag_type)(const census_context *context, const char *key, census_tag *tag);
-extern census_context_get_tag_type census_context_get_tag_import;
-#define census_context_get_tag census_context_get_tag_import
-typedef size_t(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size);
-extern census_context_encode_type census_context_encode_import;
-#define census_context_encode census_context_encode_import
-typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size);
-extern census_context_decode_type census_context_decode_import;
-#define census_context_decode census_context_decode_import
-typedef int(*census_trace_mask_type)(const census_context *context);
-extern census_trace_mask_type census_trace_mask_import;
-#define census_trace_mask census_trace_mask_import
-typedef void(*census_set_trace_mask_type)(int trace_mask);
-extern census_set_trace_mask_type census_set_trace_mask_import;
-#define census_set_trace_mask census_set_trace_mask_import
-typedef census_timestamp(*census_start_rpc_op_timestamp_type)(void);
-extern census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import;
-#define census_start_rpc_op_timestamp census_start_rpc_op_timestamp_import
-typedef census_context *(*census_start_client_rpc_op_type)(const census_context *context, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, const census_timestamp *start_time);
-extern census_start_client_rpc_op_type census_start_client_rpc_op_import;
-#define census_start_client_rpc_op census_start_client_rpc_op_import
-typedef void(*census_set_rpc_client_peer_type)(census_context *context, const char *peer);
-extern census_set_rpc_client_peer_type census_set_rpc_client_peer_import;
-#define census_set_rpc_client_peer census_set_rpc_client_peer_import
-typedef census_context *(*census_start_server_rpc_op_type)(const char *buffer, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, census_timestamp *start_time);
-extern census_start_server_rpc_op_type census_start_server_rpc_op_import;
-#define census_start_server_rpc_op census_start_server_rpc_op_import
-typedef census_context *(*census_start_op_type)(census_context *context, const char *family, const char *name, int trace_mask);
-extern census_start_op_type census_start_op_import;
-#define census_start_op census_start_op_import
-typedef void(*census_end_op_type)(census_context *context, int status);
-extern census_end_op_type census_end_op_import;
-#define census_end_op census_end_op_import
-typedef void(*census_trace_print_type)(census_context *context, uint32_t type, const char *buffer, size_t n);
-extern census_trace_print_type census_trace_print_import;
-#define census_trace_print census_trace_print_import
-typedef int(*census_trace_scan_start_type)(int consume);
-extern census_trace_scan_start_type census_trace_scan_start_import;
-#define census_trace_scan_start census_trace_scan_start_import
-typedef int(*census_get_trace_record_type)(census_trace_record *trace_record);
-extern census_get_trace_record_type census_get_trace_record_import;
-#define census_get_trace_record census_get_trace_record_import
-typedef void(*census_trace_scan_end_type)();
-extern census_trace_scan_end_type census_trace_scan_end_import;
-#define census_trace_scan_end census_trace_scan_end_import
-typedef void(*census_record_values_type)(census_context *context, census_value *values, size_t nvalues);
-extern census_record_values_type census_record_values_import;
-#define census_record_values census_record_values_import
-typedef census_view *(*census_view_create_type)(uint32_t metric_id, const census_context *tags, const census_aggregation *aggregations, size_t naggregations);
-extern census_view_create_type census_view_create_import;
-#define census_view_create census_view_create_import
-typedef void(*census_view_delete_type)(census_view *view);
-extern census_view_delete_type census_view_delete_import;
-#define census_view_delete census_view_delete_import
-typedef size_t(*census_view_metric_type)(const census_view *view);
-extern census_view_metric_type census_view_metric_import;
-#define census_view_metric census_view_metric_import
-typedef size_t(*census_view_naggregations_type)(const census_view *view);
-extern census_view_naggregations_type census_view_naggregations_import;
-#define census_view_naggregations census_view_naggregations_import
-typedef const census_context *(*census_view_tags_type)(const census_view *view);
-extern census_view_tags_type census_view_tags_import;
-#define census_view_tags census_view_tags_import
-typedef const census_aggregation *(*census_view_aggregrations_type)(const census_view *view);
-extern census_view_aggregrations_type census_view_aggregrations_import;
-#define census_view_aggregrations census_view_aggregrations_import
-typedef const census_view_data *(*census_view_get_data_type)(const census_view *view);
-extern census_view_get_data_type census_view_get_data_import;
-#define census_view_get_data census_view_get_data_import
-typedef void(*census_view_reset_type)(census_view *view);
-extern census_view_reset_type census_view_reset_import;
-#define census_view_reset census_view_reset_import
-typedef int(*grpc_compression_algorithm_parse_type)(const char *name, size_t name_length, grpc_compression_algorithm *algorithm);
-extern grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import;
-#define grpc_compression_algorithm_parse grpc_compression_algorithm_parse_import
-typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, char **name);
-extern grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import;
-#define grpc_compression_algorithm_name grpc_compression_algorithm_name_import
-typedef grpc_compression_algorithm(*grpc_compression_algorithm_for_level_type)(grpc_compression_level level, uint32_t accepted_encodings);
-extern grpc_compression_algorithm_for_level_type grpc_compression_algorithm_for_level_import;
-#define grpc_compression_algorithm_for_level grpc_compression_algorithm_for_level_import
-typedef void(*grpc_compression_options_init_type)(grpc_compression_options *opts);
-extern grpc_compression_options_init_type grpc_compression_options_init_import;
-#define grpc_compression_options_init grpc_compression_options_init_import
-typedef void(*grpc_compression_options_enable_algorithm_type)(grpc_compression_options *opts, grpc_compression_algorithm algorithm);
-extern grpc_compression_options_enable_algorithm_type grpc_compression_options_enable_algorithm_import;
-#define grpc_compression_options_enable_algorithm grpc_compression_options_enable_algorithm_import
-typedef void(*grpc_compression_options_disable_algorithm_type)(grpc_compression_options *opts, grpc_compression_algorithm algorithm);
-extern grpc_compression_options_disable_algorithm_type grpc_compression_options_disable_algorithm_import;
-#define grpc_compression_options_disable_algorithm grpc_compression_options_disable_algorithm_import
-typedef int(*grpc_compression_options_is_algorithm_enabled_type)(const grpc_compression_options *opts, grpc_compression_algorithm algorithm);
-extern grpc_compression_options_is_algorithm_enabled_type grpc_compression_options_is_algorithm_enabled_import;
-#define grpc_compression_options_is_algorithm_enabled grpc_compression_options_is_algorithm_enabled_import
-typedef void(*grpc_metadata_array_init_type)(grpc_metadata_array *array);
-extern grpc_metadata_array_init_type grpc_metadata_array_init_import;
-#define grpc_metadata_array_init grpc_metadata_array_init_import
-typedef void(*grpc_metadata_array_destroy_type)(grpc_metadata_array *array);
-extern grpc_metadata_array_destroy_type grpc_metadata_array_destroy_import;
-#define grpc_metadata_array_destroy grpc_metadata_array_destroy_import
-typedef void(*grpc_call_details_init_type)(grpc_call_details *details);
-extern grpc_call_details_init_type grpc_call_details_init_import;
-#define grpc_call_details_init grpc_call_details_init_import
-typedef void(*grpc_call_details_destroy_type)(grpc_call_details *details);
-extern grpc_call_details_destroy_type grpc_call_details_destroy_import;
-#define grpc_call_details_destroy grpc_call_details_destroy_import
-typedef void(*grpc_register_plugin_type)(void (*init)(void), void (*destroy)(void));
-extern grpc_register_plugin_type grpc_register_plugin_import;
-#define grpc_register_plugin grpc_register_plugin_import
-typedef void(*grpc_init_type)(void);
-extern grpc_init_type grpc_init_import;
-#define grpc_init grpc_init_import
-typedef void(*grpc_shutdown_type)(void);
-extern grpc_shutdown_type grpc_shutdown_import;
-#define grpc_shutdown grpc_shutdown_import
-typedef const char *(*grpc_version_string_type)(void);
-extern grpc_version_string_type grpc_version_string_import;
-#define grpc_version_string grpc_version_string_import
-typedef grpc_completion_queue *(*grpc_completion_queue_create_type)(void *reserved);
-extern grpc_completion_queue_create_type grpc_completion_queue_create_import;
-#define grpc_completion_queue_create grpc_completion_queue_create_import
-typedef grpc_event(*grpc_completion_queue_next_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *reserved);
-extern grpc_completion_queue_next_type grpc_completion_queue_next_import;
-#define grpc_completion_queue_next grpc_completion_queue_next_import
-typedef grpc_event(*grpc_completion_queue_pluck_type)(grpc_completion_queue *cq, void *tag, gpr_timespec deadline, void *reserved);
-extern grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import;
-#define grpc_completion_queue_pluck grpc_completion_queue_pluck_import
-typedef void(*grpc_completion_queue_shutdown_type)(grpc_completion_queue *cq);
-extern grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import;
-#define grpc_completion_queue_shutdown grpc_completion_queue_shutdown_import
-typedef void(*grpc_completion_queue_destroy_type)(grpc_completion_queue *cq);
-extern grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import;
-#define grpc_completion_queue_destroy grpc_completion_queue_destroy_import
-typedef grpc_alarm *(*grpc_alarm_create_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *tag);
-extern grpc_alarm_create_type grpc_alarm_create_import;
-#define grpc_alarm_create grpc_alarm_create_import
-typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm);
-extern grpc_alarm_cancel_type grpc_alarm_cancel_import;
-#define grpc_alarm_cancel grpc_alarm_cancel_import
-typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm);
-extern grpc_alarm_destroy_type grpc_alarm_destroy_import;
-#define grpc_alarm_destroy grpc_alarm_destroy_import
-typedef grpc_connectivity_state(*grpc_channel_check_connectivity_state_type)(grpc_channel *channel, int try_to_connect);
-extern grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import;
-#define grpc_channel_check_connectivity_state grpc_channel_check_connectivity_state_import
-typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag);
-extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
-#define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import
-typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, const char *method, const char *host, gpr_timespec deadline, void *reserved);
-extern grpc_channel_create_call_type grpc_channel_create_call_import;
-#define grpc_channel_create_call grpc_channel_create_call_import
-typedef void(*grpc_channel_ping_type)(grpc_channel *channel, grpc_completion_queue *cq, void *tag, void *reserved);
-extern grpc_channel_ping_type grpc_channel_ping_import;
-#define grpc_channel_ping grpc_channel_ping_import
-typedef void *(*grpc_channel_register_call_type)(grpc_channel *channel, const char *method, const char *host, void *reserved);
-extern grpc_channel_register_call_type grpc_channel_register_call_import;
-#define grpc_channel_register_call grpc_channel_register_call_import
-typedef grpc_call *(*grpc_channel_create_registered_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, void *registered_call_handle, gpr_timespec deadline, void *reserved);
-extern grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import;
-#define grpc_channel_create_registered_call grpc_channel_create_registered_call_import
-typedef grpc_call_error(*grpc_call_start_batch_type)(grpc_call *call, const grpc_op *ops, size_t nops, void *tag, void *reserved);
-extern grpc_call_start_batch_type grpc_call_start_batch_import;
-#define grpc_call_start_batch grpc_call_start_batch_import
-typedef char *(*grpc_call_get_peer_type)(grpc_call *call);
-extern grpc_call_get_peer_type grpc_call_get_peer_import;
-#define grpc_call_get_peer grpc_call_get_peer_import
-typedef void(*grpc_census_call_set_context_type)(grpc_call *call, struct census_context *context);
-extern grpc_census_call_set_context_type grpc_census_call_set_context_import;
-#define grpc_census_call_set_context grpc_census_call_set_context_import
-typedef struct census_context *(*grpc_census_call_get_context_type)(grpc_call *call);
-extern grpc_census_call_get_context_type grpc_census_call_get_context_import;
-#define grpc_census_call_get_context grpc_census_call_get_context_import
-typedef char *(*grpc_channel_get_target_type)(grpc_channel *channel);
-extern grpc_channel_get_target_type grpc_channel_get_target_import;
-#define grpc_channel_get_target grpc_channel_get_target_import
-typedef grpc_channel *(*grpc_insecure_channel_create_type)(const char *target, const grpc_channel_args *args, void *reserved);
-extern grpc_insecure_channel_create_type grpc_insecure_channel_create_import;
-#define grpc_insecure_channel_create grpc_insecure_channel_create_import
-typedef grpc_channel *(*grpc_lame_client_channel_create_type)(const char *target, grpc_status_code error_code, const char *error_message);
-extern grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import;
-#define grpc_lame_client_channel_create grpc_lame_client_channel_create_import
-typedef void(*grpc_channel_destroy_type)(grpc_channel *channel);
-extern grpc_channel_destroy_type grpc_channel_destroy_import;
-#define grpc_channel_destroy grpc_channel_destroy_import
-typedef grpc_call_error(*grpc_call_cancel_type)(grpc_call *call, void *reserved);
-extern grpc_call_cancel_type grpc_call_cancel_import;
-#define grpc_call_cancel grpc_call_cancel_import
-typedef grpc_call_error(*grpc_call_cancel_with_status_type)(grpc_call *call, grpc_status_code status, const char *description, void *reserved);
-extern grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import;
-#define grpc_call_cancel_with_status grpc_call_cancel_with_status_import
-typedef void(*grpc_call_destroy_type)(grpc_call *call);
-extern grpc_call_destroy_type grpc_call_destroy_import;
-#define grpc_call_destroy grpc_call_destroy_import
-typedef grpc_call_error(*grpc_server_request_call_type)(grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new);
-extern grpc_server_request_call_type grpc_server_request_call_import;
-#define grpc_server_request_call grpc_server_request_call_import
-typedef void *(*grpc_server_register_method_type)(grpc_server *server, const char *method, const char *host, grpc_server_register_method_payload_handling payload_handling, uint32_t flags);
-extern grpc_server_register_method_type grpc_server_register_method_import;
-#define grpc_server_register_method grpc_server_register_method_import
-typedef grpc_call_error(*grpc_server_request_registered_call_type)(grpc_server *server, void *registered_method, grpc_call **call, gpr_timespec *deadline, grpc_metadata_array *request_metadata, grpc_byte_buffer **optional_payload, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new);
-extern grpc_server_request_registered_call_type grpc_server_request_registered_call_import;
-#define grpc_server_request_registered_call grpc_server_request_registered_call_import
-typedef grpc_server *(*grpc_server_create_type)(const grpc_channel_args *args, void *reserved);
-extern grpc_server_create_type grpc_server_create_import;
-#define grpc_server_create grpc_server_create_import
-typedef void(*grpc_server_register_completion_queue_type)(grpc_server *server, grpc_completion_queue *cq, void *reserved);
-extern grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import;
-#define grpc_server_register_completion_queue grpc_server_register_completion_queue_import
-typedef void(*grpc_server_register_non_listening_completion_queue_type)(grpc_server *server, grpc_completion_queue *q, void *reserved);
-extern grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import;
-#define grpc_server_register_non_listening_completion_queue grpc_server_register_non_listening_completion_queue_import
-typedef int(*grpc_server_add_insecure_http2_port_type)(grpc_server *server, const char *addr);
-extern grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import;
-#define grpc_server_add_insecure_http2_port grpc_server_add_insecure_http2_port_import
-typedef void(*grpc_server_start_type)(grpc_server *server);
-extern grpc_server_start_type grpc_server_start_import;
-#define grpc_server_start grpc_server_start_import
-typedef void(*grpc_server_shutdown_and_notify_type)(grpc_server *server, grpc_completion_queue *cq, void *tag);
-extern grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import;
-#define grpc_server_shutdown_and_notify grpc_server_shutdown_and_notify_import
-typedef void(*grpc_server_cancel_all_calls_type)(grpc_server *server);
-extern grpc_server_cancel_all_calls_type grpc_server_cancel_all_calls_import;
-#define grpc_server_cancel_all_calls grpc_server_cancel_all_calls_import
-typedef void(*grpc_server_destroy_type)(grpc_server *server);
-extern grpc_server_destroy_type grpc_server_destroy_import;
-#define grpc_server_destroy grpc_server_destroy_import
-typedef int(*grpc_tracer_set_enabled_type)(const char *name, int enabled);
-extern grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import;
-#define grpc_tracer_set_enabled grpc_tracer_set_enabled_import
-typedef int(*grpc_header_key_is_legal_type)(const char *key, size_t length);
-extern grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
-#define grpc_header_key_is_legal grpc_header_key_is_legal_import
-typedef int(*grpc_header_nonbin_value_is_legal_type)(const char *value, size_t length);
-extern grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
-#define grpc_header_nonbin_value_is_legal grpc_header_nonbin_value_is_legal_import
-typedef int(*grpc_is_binary_header_type)(const char *key, size_t length);
-extern grpc_is_binary_header_type grpc_is_binary_header_import;
-#define grpc_is_binary_header grpc_is_binary_header_import
-typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error);
-extern grpc_call_error_to_string_type grpc_call_error_to_string_import;
-#define grpc_call_error_to_string grpc_call_error_to_string_import
-typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args);
-extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
-#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import
-typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd);
-extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
-#define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import
-typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it);
-extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
-#define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import
-typedef grpc_auth_property_iterator(*grpc_auth_context_property_iterator_type)(const grpc_auth_context *ctx);
-extern grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import;
-#define grpc_auth_context_property_iterator grpc_auth_context_property_iterator_import
-typedef grpc_auth_property_iterator(*grpc_auth_context_peer_identity_type)(const grpc_auth_context *ctx);
-extern grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import;
-#define grpc_auth_context_peer_identity grpc_auth_context_peer_identity_import
-typedef grpc_auth_property_iterator(*grpc_auth_context_find_properties_by_name_type)(const grpc_auth_context *ctx, const char *name);
-extern grpc_auth_context_find_properties_by_name_type grpc_auth_context_find_properties_by_name_import;
-#define grpc_auth_context_find_properties_by_name grpc_auth_context_find_properties_by_name_import
-typedef const char *(*grpc_auth_context_peer_identity_property_name_type)(const grpc_auth_context *ctx);
-extern grpc_auth_context_peer_identity_property_name_type grpc_auth_context_peer_identity_property_name_import;
-#define grpc_auth_context_peer_identity_property_name grpc_auth_context_peer_identity_property_name_import
-typedef int(*grpc_auth_context_peer_is_authenticated_type)(const grpc_auth_context *ctx);
-extern grpc_auth_context_peer_is_authenticated_type grpc_auth_context_peer_is_authenticated_import;
-#define grpc_auth_context_peer_is_authenticated grpc_auth_context_peer_is_authenticated_import
-typedef grpc_auth_context *(*grpc_call_auth_context_type)(grpc_call *call);
-extern grpc_call_auth_context_type grpc_call_auth_context_import;
-#define grpc_call_auth_context grpc_call_auth_context_import
-typedef void(*grpc_auth_context_release_type)(grpc_auth_context *context);
-extern grpc_auth_context_release_type grpc_auth_context_release_import;
-#define grpc_auth_context_release grpc_auth_context_release_import
-typedef void(*grpc_auth_context_add_property_type)(grpc_auth_context *ctx, const char *name, const char *value, size_t value_length);
-extern grpc_auth_context_add_property_type grpc_auth_context_add_property_import;
-#define grpc_auth_context_add_property grpc_auth_context_add_property_import
-typedef void(*grpc_auth_context_add_cstring_property_type)(grpc_auth_context *ctx, const char *name, const char *value);
-extern grpc_auth_context_add_cstring_property_type grpc_auth_context_add_cstring_property_import;
-#define grpc_auth_context_add_cstring_property grpc_auth_context_add_cstring_property_import
-typedef int(*grpc_auth_context_set_peer_identity_property_name_type)(grpc_auth_context *ctx, const char *name);
-extern grpc_auth_context_set_peer_identity_property_name_type grpc_auth_context_set_peer_identity_property_name_import;
-#define grpc_auth_context_set_peer_identity_property_name grpc_auth_context_set_peer_identity_property_name_import
-typedef void(*grpc_channel_credentials_release_type)(grpc_channel_credentials *creds);
-extern grpc_channel_credentials_release_type grpc_channel_credentials_release_import;
-#define grpc_channel_credentials_release grpc_channel_credentials_release_import
-typedef grpc_channel_credentials *(*grpc_google_default_credentials_create_type)(void);
-extern grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import;
-#define grpc_google_default_credentials_create grpc_google_default_credentials_create_import
-typedef void(*grpc_set_ssl_roots_override_callback_type)(grpc_ssl_roots_override_callback cb);
-extern grpc_set_ssl_roots_override_callback_type grpc_set_ssl_roots_override_callback_import;
-#define grpc_set_ssl_roots_override_callback grpc_set_ssl_roots_override_callback_import
-typedef grpc_channel_credentials *(*grpc_ssl_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, void *reserved);
-extern grpc_ssl_credentials_create_type grpc_ssl_credentials_create_import;
-#define grpc_ssl_credentials_create grpc_ssl_credentials_create_import
-typedef void(*grpc_call_credentials_release_type)(grpc_call_credentials *creds);
-extern grpc_call_credentials_release_type grpc_call_credentials_release_import;
-#define grpc_call_credentials_release grpc_call_credentials_release_import
-typedef grpc_channel_credentials *(*grpc_composite_channel_credentials_create_type)(grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds, void *reserved);
-extern grpc_composite_channel_credentials_create_type grpc_composite_channel_credentials_create_import;
-#define grpc_composite_channel_credentials_create grpc_composite_channel_credentials_create_import
-typedef grpc_call_credentials *(*grpc_composite_call_credentials_create_type)(grpc_call_credentials *creds1, grpc_call_credentials *creds2, void *reserved);
-extern grpc_composite_call_credentials_create_type grpc_composite_call_credentials_create_import;
-#define grpc_composite_call_credentials_create grpc_composite_call_credentials_create_import
-typedef grpc_call_credentials *(*grpc_google_compute_engine_credentials_create_type)(void *reserved);
-extern grpc_google_compute_engine_credentials_create_type grpc_google_compute_engine_credentials_create_import;
-#define grpc_google_compute_engine_credentials_create grpc_google_compute_engine_credentials_create_import
-typedef gpr_timespec(*grpc_max_auth_token_lifetime_type)();
-extern grpc_max_auth_token_lifetime_type grpc_max_auth_token_lifetime_import;
-#define grpc_max_auth_token_lifetime grpc_max_auth_token_lifetime_import
-typedef grpc_call_credentials *(*grpc_service_account_jwt_access_credentials_create_type)(const char *json_key, gpr_timespec token_lifetime, void *reserved);
-extern grpc_service_account_jwt_access_credentials_create_type grpc_service_account_jwt_access_credentials_create_import;
-#define grpc_service_account_jwt_access_credentials_create grpc_service_account_jwt_access_credentials_create_import
-typedef grpc_call_credentials *(*grpc_google_refresh_token_credentials_create_type)(const char *json_refresh_token, void *reserved);
-extern grpc_google_refresh_token_credentials_create_type grpc_google_refresh_token_credentials_create_import;
-#define grpc_google_refresh_token_credentials_create grpc_google_refresh_token_credentials_create_import
-typedef grpc_call_credentials *(*grpc_access_token_credentials_create_type)(const char *access_token, void *reserved);
-extern grpc_access_token_credentials_create_type grpc_access_token_credentials_create_import;
-#define grpc_access_token_credentials_create grpc_access_token_credentials_create_import
-typedef grpc_call_credentials *(*grpc_google_iam_credentials_create_type)(const char *authorization_token, const char *authority_selector, void *reserved);
-extern grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_import;
-#define grpc_google_iam_credentials_create grpc_google_iam_credentials_create_import
-typedef grpc_call_credentials *(*grpc_metadata_credentials_create_from_plugin_type)(grpc_metadata_credentials_plugin plugin, void *reserved);
-extern grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import;
-#define grpc_metadata_credentials_create_from_plugin grpc_metadata_credentials_create_from_plugin_import
-typedef grpc_channel *(*grpc_secure_channel_create_type)(grpc_channel_credentials *creds, const char *target, const grpc_channel_args *args, void *reserved);
-extern grpc_secure_channel_create_type grpc_secure_channel_create_import;
-#define grpc_secure_channel_create grpc_secure_channel_create_import
-typedef void(*grpc_server_credentials_release_type)(grpc_server_credentials *creds);
-extern grpc_server_credentials_release_type grpc_server_credentials_release_import;
-#define grpc_server_credentials_release grpc_server_credentials_release_import
-typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, int force_client_auth, void *reserved);
-extern grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import;
-#define grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create_import
-typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_ex_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, void *reserved);
-extern grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import;
-#define grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex_import
-typedef int(*grpc_server_add_secure_http2_port_type)(grpc_server *server, const char *addr, grpc_server_credentials *creds);
-extern grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import;
-#define grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port_import
-typedef grpc_call_error(*grpc_call_set_credentials_type)(grpc_call *call, grpc_call_credentials *creds);
-extern grpc_call_set_credentials_type grpc_call_set_credentials_import;
-#define grpc_call_set_credentials grpc_call_set_credentials_import
-typedef void(*grpc_server_credentials_set_auth_metadata_processor_type)(grpc_server_credentials *creds, grpc_auth_metadata_processor processor);
-extern grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import;
-#define grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor_import
-typedef void *(*gpr_malloc_type)(size_t size);
-extern gpr_malloc_type gpr_malloc_import;
-#define gpr_malloc gpr_malloc_import
-typedef void(*gpr_free_type)(void *ptr);
-extern gpr_free_type gpr_free_import;
-#define gpr_free gpr_free_import
-typedef void *(*gpr_realloc_type)(void *p, size_t size);
-extern gpr_realloc_type gpr_realloc_import;
-#define gpr_realloc gpr_realloc_import
-typedef void *(*gpr_malloc_aligned_type)(size_t size, size_t alignment_log);
-extern gpr_malloc_aligned_type gpr_malloc_aligned_import;
-#define gpr_malloc_aligned gpr_malloc_aligned_import
-typedef void(*gpr_free_aligned_type)(void *ptr);
-extern gpr_free_aligned_type gpr_free_aligned_import;
-#define gpr_free_aligned gpr_free_aligned_import
-typedef void(*gpr_set_allocation_functions_type)(gpr_allocation_functions functions);
-extern gpr_set_allocation_functions_type gpr_set_allocation_functions_import;
-#define gpr_set_allocation_functions gpr_set_allocation_functions_import
-typedef gpr_allocation_functions(*gpr_get_allocation_functions_type)();
-extern gpr_get_allocation_functions_type gpr_get_allocation_functions_import;
-#define gpr_get_allocation_functions gpr_get_allocation_functions_import
-typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_create_type)(gpr_slice *slices, size_t nslices);
-extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import;
-#define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import
-typedef grpc_byte_buffer *(*grpc_raw_compressed_byte_buffer_create_type)(gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression);
-extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import;
-#define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import
-typedef grpc_byte_buffer *(*grpc_byte_buffer_copy_type)(grpc_byte_buffer *bb);
-extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import;
-#define grpc_byte_buffer_copy grpc_byte_buffer_copy_import
-typedef size_t(*grpc_byte_buffer_length_type)(grpc_byte_buffer *bb);
-extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
-#define grpc_byte_buffer_length grpc_byte_buffer_length_import
-typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer);
-extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
-#define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import
-typedef void(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer);
-extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
-#define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import
-typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader);
-extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import;
-#define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import
-typedef int(*grpc_byte_buffer_reader_next_type)(grpc_byte_buffer_reader *reader, gpr_slice *slice);
-extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import;
-#define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import
-typedef gpr_slice(*grpc_byte_buffer_reader_readall_type)(grpc_byte_buffer_reader *reader);
-extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import;
-#define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import
-typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader);
-extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
-#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import
-typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(4, 5);
-extern gpr_log_type gpr_log_import;
-#define gpr_log gpr_log_import
-typedef void(*gpr_log_message_type)(const char *file, int line, gpr_log_severity severity, const char *message);
-extern gpr_log_message_type gpr_log_message_import;
-#define gpr_log_message gpr_log_message_import
-typedef void(*gpr_set_log_verbosity_type)(gpr_log_severity min_severity_to_print);
-extern gpr_set_log_verbosity_type gpr_set_log_verbosity_import;
-#define gpr_set_log_verbosity gpr_set_log_verbosity_import
-typedef void(*gpr_log_verbosity_init_type)();
-extern gpr_log_verbosity_init_type gpr_log_verbosity_init_import;
-#define gpr_log_verbosity_init gpr_log_verbosity_init_import
-typedef void(*gpr_set_log_function_type)(gpr_log_func func);
-extern gpr_set_log_function_type gpr_set_log_function_import;
-#define gpr_set_log_function gpr_set_log_function_import
-typedef gpr_slice(*gpr_slice_ref_type)(gpr_slice s);
-extern gpr_slice_ref_type gpr_slice_ref_import;
-#define gpr_slice_ref gpr_slice_ref_import
-typedef void(*gpr_slice_unref_type)(gpr_slice s);
-extern gpr_slice_unref_type gpr_slice_unref_import;
-#define gpr_slice_unref gpr_slice_unref_import
-typedef gpr_slice(*gpr_slice_new_type)(void *p, size_t len, void (*destroy)(void *));
-extern gpr_slice_new_type gpr_slice_new_import;
-#define gpr_slice_new gpr_slice_new_import
-typedef gpr_slice(*gpr_slice_new_with_len_type)(void *p, size_t len, void (*destroy)(void *, size_t));
-extern gpr_slice_new_with_len_type gpr_slice_new_with_len_import;
-#define gpr_slice_new_with_len gpr_slice_new_with_len_import
-typedef gpr_slice(*gpr_slice_malloc_type)(size_t length);
-extern gpr_slice_malloc_type gpr_slice_malloc_import;
-#define gpr_slice_malloc gpr_slice_malloc_import
-typedef gpr_slice(*gpr_slice_from_copied_string_type)(const char *source);
-extern gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import;
-#define gpr_slice_from_copied_string gpr_slice_from_copied_string_import
-typedef gpr_slice(*gpr_slice_from_copied_buffer_type)(const char *source, size_t len);
-extern gpr_slice_from_copied_buffer_type gpr_slice_from_copied_buffer_import;
-#define gpr_slice_from_copied_buffer gpr_slice_from_copied_buffer_import
-typedef gpr_slice(*gpr_slice_from_static_string_type)(const char *source);
-extern gpr_slice_from_static_string_type gpr_slice_from_static_string_import;
-#define gpr_slice_from_static_string gpr_slice_from_static_string_import
-typedef gpr_slice(*gpr_slice_sub_type)(gpr_slice s, size_t begin, size_t end);
-extern gpr_slice_sub_type gpr_slice_sub_import;
-#define gpr_slice_sub gpr_slice_sub_import
-typedef gpr_slice(*gpr_slice_sub_no_ref_type)(gpr_slice s, size_t begin, size_t end);
-extern gpr_slice_sub_no_ref_type gpr_slice_sub_no_ref_import;
-#define gpr_slice_sub_no_ref gpr_slice_sub_no_ref_import
-typedef gpr_slice(*gpr_slice_split_tail_type)(gpr_slice *s, size_t split);
-extern gpr_slice_split_tail_type gpr_slice_split_tail_import;
-#define gpr_slice_split_tail gpr_slice_split_tail_import
-typedef gpr_slice(*gpr_slice_split_head_type)(gpr_slice *s, size_t split);
-extern gpr_slice_split_head_type gpr_slice_split_head_import;
-#define gpr_slice_split_head gpr_slice_split_head_import
-typedef gpr_slice(*gpr_empty_slice_type)(void);
-extern gpr_empty_slice_type gpr_empty_slice_import;
-#define gpr_empty_slice gpr_empty_slice_import
-typedef int(*gpr_slice_cmp_type)(gpr_slice a, gpr_slice b);
-extern gpr_slice_cmp_type gpr_slice_cmp_import;
-#define gpr_slice_cmp gpr_slice_cmp_import
-typedef int(*gpr_slice_str_cmp_type)(gpr_slice a, const char *b);
-extern gpr_slice_str_cmp_type gpr_slice_str_cmp_import;
-#define gpr_slice_str_cmp gpr_slice_str_cmp_import
-typedef void(*gpr_slice_buffer_init_type)(gpr_slice_buffer *sb);
-extern gpr_slice_buffer_init_type gpr_slice_buffer_init_import;
-#define gpr_slice_buffer_init gpr_slice_buffer_init_import
-typedef void(*gpr_slice_buffer_destroy_type)(gpr_slice_buffer *sb);
-extern gpr_slice_buffer_destroy_type gpr_slice_buffer_destroy_import;
-#define gpr_slice_buffer_destroy gpr_slice_buffer_destroy_import
-typedef void(*gpr_slice_buffer_add_type)(gpr_slice_buffer *sb, gpr_slice slice);
-extern gpr_slice_buffer_add_type gpr_slice_buffer_add_import;
-#define gpr_slice_buffer_add gpr_slice_buffer_add_import
-typedef size_t(*gpr_slice_buffer_add_indexed_type)(gpr_slice_buffer *sb, gpr_slice slice);
-extern gpr_slice_buffer_add_indexed_type gpr_slice_buffer_add_indexed_import;
-#define gpr_slice_buffer_add_indexed gpr_slice_buffer_add_indexed_import
-typedef void(*gpr_slice_buffer_addn_type)(gpr_slice_buffer *sb, gpr_slice *slices, size_t n);
-extern gpr_slice_buffer_addn_type gpr_slice_buffer_addn_import;
-#define gpr_slice_buffer_addn gpr_slice_buffer_addn_import
-typedef uint8_t *(*gpr_slice_buffer_tiny_add_type)(gpr_slice_buffer *sb, size_t len);
-extern gpr_slice_buffer_tiny_add_type gpr_slice_buffer_tiny_add_import;
-#define gpr_slice_buffer_tiny_add gpr_slice_buffer_tiny_add_import
-typedef void(*gpr_slice_buffer_pop_type)(gpr_slice_buffer *sb);
-extern gpr_slice_buffer_pop_type gpr_slice_buffer_pop_import;
-#define gpr_slice_buffer_pop gpr_slice_buffer_pop_import
-typedef void(*gpr_slice_buffer_reset_and_unref_type)(gpr_slice_buffer *sb);
-extern gpr_slice_buffer_reset_and_unref_type gpr_slice_buffer_reset_and_unref_import;
-#define gpr_slice_buffer_reset_and_unref gpr_slice_buffer_reset_and_unref_import
-typedef void(*gpr_slice_buffer_swap_type)(gpr_slice_buffer *a, gpr_slice_buffer *b);
-extern gpr_slice_buffer_swap_type gpr_slice_buffer_swap_import;
-#define gpr_slice_buffer_swap gpr_slice_buffer_swap_import
-typedef void(*gpr_slice_buffer_move_into_type)(gpr_slice_buffer *src, gpr_slice_buffer *dst);
-extern gpr_slice_buffer_move_into_type gpr_slice_buffer_move_into_import;
-#define gpr_slice_buffer_move_into gpr_slice_buffer_move_into_import
-typedef void(*gpr_slice_buffer_trim_end_type)(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *garbage);
-extern gpr_slice_buffer_trim_end_type gpr_slice_buffer_trim_end_import;
-#define gpr_slice_buffer_trim_end gpr_slice_buffer_trim_end_import
-typedef void(*gpr_slice_buffer_move_first_type)(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *dst);
-extern gpr_slice_buffer_move_first_type gpr_slice_buffer_move_first_import;
-#define gpr_slice_buffer_move_first gpr_slice_buffer_move_first_import
-typedef gpr_slice(*gpr_slice_buffer_take_first_type)(gpr_slice_buffer *src);
-extern gpr_slice_buffer_take_first_type gpr_slice_buffer_take_first_import;
-#define gpr_slice_buffer_take_first gpr_slice_buffer_take_first_import
-typedef void(*gpr_mu_init_type)(gpr_mu *mu);
-extern gpr_mu_init_type gpr_mu_init_import;
-#define gpr_mu_init gpr_mu_init_import
-typedef void(*gpr_mu_destroy_type)(gpr_mu *mu);
-extern gpr_mu_destroy_type gpr_mu_destroy_import;
-#define gpr_mu_destroy gpr_mu_destroy_import
-typedef void(*gpr_mu_lock_type)(gpr_mu *mu);
-extern gpr_mu_lock_type gpr_mu_lock_import;
-#define gpr_mu_lock gpr_mu_lock_import
-typedef void(*gpr_mu_unlock_type)(gpr_mu *mu);
-extern gpr_mu_unlock_type gpr_mu_unlock_import;
-#define gpr_mu_unlock gpr_mu_unlock_import
-typedef int(*gpr_mu_trylock_type)(gpr_mu *mu);
-extern gpr_mu_trylock_type gpr_mu_trylock_import;
-#define gpr_mu_trylock gpr_mu_trylock_import
-typedef void(*gpr_cv_init_type)(gpr_cv *cv);
-extern gpr_cv_init_type gpr_cv_init_import;
-#define gpr_cv_init gpr_cv_init_import
-typedef void(*gpr_cv_destroy_type)(gpr_cv *cv);
-extern gpr_cv_destroy_type gpr_cv_destroy_import;
-#define gpr_cv_destroy gpr_cv_destroy_import
-typedef int(*gpr_cv_wait_type)(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline);
-extern gpr_cv_wait_type gpr_cv_wait_import;
-#define gpr_cv_wait gpr_cv_wait_import
-typedef void(*gpr_cv_signal_type)(gpr_cv *cv);
-extern gpr_cv_signal_type gpr_cv_signal_import;
-#define gpr_cv_signal gpr_cv_signal_import
-typedef void(*gpr_cv_broadcast_type)(gpr_cv *cv);
-extern gpr_cv_broadcast_type gpr_cv_broadcast_import;
-#define gpr_cv_broadcast gpr_cv_broadcast_import
-typedef void(*gpr_once_init_type)(gpr_once *once, void (*init_routine)(void));
-extern gpr_once_init_type gpr_once_init_import;
-#define gpr_once_init gpr_once_init_import
-typedef void(*gpr_event_init_type)(gpr_event *ev);
-extern gpr_event_init_type gpr_event_init_import;
-#define gpr_event_init gpr_event_init_import
-typedef void(*gpr_event_set_type)(gpr_event *ev, void *value);
-extern gpr_event_set_type gpr_event_set_import;
-#define gpr_event_set gpr_event_set_import
-typedef void *(*gpr_event_get_type)(gpr_event *ev);
-extern gpr_event_get_type gpr_event_get_import;
-#define gpr_event_get gpr_event_get_import
-typedef void *(*gpr_event_wait_type)(gpr_event *ev, gpr_timespec abs_deadline);
-extern gpr_event_wait_type gpr_event_wait_import;
-#define gpr_event_wait gpr_event_wait_import
-typedef void(*gpr_ref_init_type)(gpr_refcount *r, int n);
-extern gpr_ref_init_type gpr_ref_init_import;
-#define gpr_ref_init gpr_ref_init_import
-typedef void(*gpr_ref_type)(gpr_refcount *r);
-extern gpr_ref_type gpr_ref_import;
-#define gpr_ref gpr_ref_import
-typedef void(*gpr_ref_non_zero_type)(gpr_refcount *r);
-extern gpr_ref_non_zero_type gpr_ref_non_zero_import;
-#define gpr_ref_non_zero gpr_ref_non_zero_import
-typedef void(*gpr_refn_type)(gpr_refcount *r, int n);
-extern gpr_refn_type gpr_refn_import;
-#define gpr_refn gpr_refn_import
-typedef int(*gpr_unref_type)(gpr_refcount *r);
-extern gpr_unref_type gpr_unref_import;
-#define gpr_unref gpr_unref_import
-typedef void(*gpr_stats_init_type)(gpr_stats_counter *c, intptr_t n);
-extern gpr_stats_init_type gpr_stats_init_import;
-#define gpr_stats_init gpr_stats_init_import
-typedef void(*gpr_stats_inc_type)(gpr_stats_counter *c, intptr_t inc);
-extern gpr_stats_inc_type gpr_stats_inc_import;
-#define gpr_stats_inc gpr_stats_inc_import
-typedef intptr_t(*gpr_stats_read_type)(const gpr_stats_counter *c);
-extern gpr_stats_read_type gpr_stats_read_import;
-#define gpr_stats_read gpr_stats_read_import
-typedef gpr_timespec(*gpr_time_0_type)(gpr_clock_type type);
-extern gpr_time_0_type gpr_time_0_import;
-#define gpr_time_0 gpr_time_0_import
-typedef gpr_timespec(*gpr_inf_future_type)(gpr_clock_type type);
-extern gpr_inf_future_type gpr_inf_future_import;
-#define gpr_inf_future gpr_inf_future_import
-typedef gpr_timespec(*gpr_inf_past_type)(gpr_clock_type type);
-extern gpr_inf_past_type gpr_inf_past_import;
-#define gpr_inf_past gpr_inf_past_import
-typedef void(*gpr_time_init_type)(void);
-extern gpr_time_init_type gpr_time_init_import;
-#define gpr_time_init gpr_time_init_import
-typedef gpr_timespec(*gpr_now_type)(gpr_clock_type clock);
-extern gpr_now_type gpr_now_import;
-#define gpr_now gpr_now_import
-typedef gpr_timespec(*gpr_convert_clock_type_type)(gpr_timespec t, gpr_clock_type target_clock);
-extern gpr_convert_clock_type_type gpr_convert_clock_type_import;
-#define gpr_convert_clock_type gpr_convert_clock_type_import
-typedef int(*gpr_time_cmp_type)(gpr_timespec a, gpr_timespec b);
-extern gpr_time_cmp_type gpr_time_cmp_import;
-#define gpr_time_cmp gpr_time_cmp_import
-typedef gpr_timespec(*gpr_time_max_type)(gpr_timespec a, gpr_timespec b);
-extern gpr_time_max_type gpr_time_max_import;
-#define gpr_time_max gpr_time_max_import
-typedef gpr_timespec(*gpr_time_min_type)(gpr_timespec a, gpr_timespec b);
-extern gpr_time_min_type gpr_time_min_import;
-#define gpr_time_min gpr_time_min_import
-typedef gpr_timespec(*gpr_time_add_type)(gpr_timespec a, gpr_timespec b);
-extern gpr_time_add_type gpr_time_add_import;
-#define gpr_time_add gpr_time_add_import
-typedef gpr_timespec(*gpr_time_sub_type)(gpr_timespec a, gpr_timespec b);
-extern gpr_time_sub_type gpr_time_sub_import;
-#define gpr_time_sub gpr_time_sub_import
-typedef gpr_timespec(*gpr_time_from_micros_type)(int64_t x, gpr_clock_type clock_type);
-extern gpr_time_from_micros_type gpr_time_from_micros_import;
-#define gpr_time_from_micros gpr_time_from_micros_import
-typedef gpr_timespec(*gpr_time_from_nanos_type)(int64_t x, gpr_clock_type clock_type);
-extern gpr_time_from_nanos_type gpr_time_from_nanos_import;
-#define gpr_time_from_nanos gpr_time_from_nanos_import
-typedef gpr_timespec(*gpr_time_from_millis_type)(int64_t x, gpr_clock_type clock_type);
-extern gpr_time_from_millis_type gpr_time_from_millis_import;
-#define gpr_time_from_millis gpr_time_from_millis_import
-typedef gpr_timespec(*gpr_time_from_seconds_type)(int64_t x, gpr_clock_type clock_type);
-extern gpr_time_from_seconds_type gpr_time_from_seconds_import;
-#define gpr_time_from_seconds gpr_time_from_seconds_import
-typedef gpr_timespec(*gpr_time_from_minutes_type)(int64_t x, gpr_clock_type clock_type);
-extern gpr_time_from_minutes_type gpr_time_from_minutes_import;
-#define gpr_time_from_minutes gpr_time_from_minutes_import
-typedef gpr_timespec(*gpr_time_from_hours_type)(int64_t x, gpr_clock_type clock_type);
-extern gpr_time_from_hours_type gpr_time_from_hours_import;
-#define gpr_time_from_hours gpr_time_from_hours_import
-typedef int32_t(*gpr_time_to_millis_type)(gpr_timespec timespec);
-extern gpr_time_to_millis_type gpr_time_to_millis_import;
-#define gpr_time_to_millis gpr_time_to_millis_import
-typedef int(*gpr_time_similar_type)(gpr_timespec a, gpr_timespec b, gpr_timespec threshold);
-extern gpr_time_similar_type gpr_time_similar_import;
-#define gpr_time_similar gpr_time_similar_import
-typedef void(*gpr_sleep_until_type)(gpr_timespec until);
-extern gpr_sleep_until_type gpr_sleep_until_import;
-#define gpr_sleep_until gpr_sleep_until_import
-typedef double(*gpr_timespec_to_micros_type)(gpr_timespec t);
-extern gpr_timespec_to_micros_type gpr_timespec_to_micros_import;
-#define gpr_timespec_to_micros gpr_timespec_to_micros_import
-typedef gpr_avl(*gpr_avl_create_type)(const gpr_avl_vtable *vtable);
-extern gpr_avl_create_type gpr_avl_create_import;
-#define gpr_avl_create gpr_avl_create_import
-typedef gpr_avl(*gpr_avl_ref_type)(gpr_avl avl);
-extern gpr_avl_ref_type gpr_avl_ref_import;
-#define gpr_avl_ref gpr_avl_ref_import
-typedef void(*gpr_avl_unref_type)(gpr_avl avl);
-extern gpr_avl_unref_type gpr_avl_unref_import;
-#define gpr_avl_unref gpr_avl_unref_import
-typedef gpr_avl(*gpr_avl_add_type)(gpr_avl avl, void *key, void *value);
-extern gpr_avl_add_type gpr_avl_add_import;
-#define gpr_avl_add gpr_avl_add_import
-typedef gpr_avl(*gpr_avl_remove_type)(gpr_avl avl, void *key);
-extern gpr_avl_remove_type gpr_avl_remove_import;
-#define gpr_avl_remove gpr_avl_remove_import
-typedef void *(*gpr_avl_get_type)(gpr_avl avl, void *key);
-extern gpr_avl_get_type gpr_avl_get_import;
-#define gpr_avl_get gpr_avl_get_import
-typedef int(*gpr_avl_maybe_get_type)(gpr_avl avl, void *key, void **value);
-extern gpr_avl_maybe_get_type gpr_avl_maybe_get_import;
-#define gpr_avl_maybe_get gpr_avl_maybe_get_import
-typedef int(*gpr_avl_is_empty_type)(gpr_avl avl);
-extern gpr_avl_is_empty_type gpr_avl_is_empty_import;
-#define gpr_avl_is_empty gpr_avl_is_empty_import
-typedef gpr_cmdline *(*gpr_cmdline_create_type)(const char *description);
-extern gpr_cmdline_create_type gpr_cmdline_create_import;
-#define gpr_cmdline_create gpr_cmdline_create_import
-typedef void(*gpr_cmdline_add_int_type)(gpr_cmdline *cl, const char *name, const char *help, int *value);
-extern gpr_cmdline_add_int_type gpr_cmdline_add_int_import;
-#define gpr_cmdline_add_int gpr_cmdline_add_int_import
-typedef void(*gpr_cmdline_add_flag_type)(gpr_cmdline *cl, const char *name, const char *help, int *value);
-extern gpr_cmdline_add_flag_type gpr_cmdline_add_flag_import;
-#define gpr_cmdline_add_flag gpr_cmdline_add_flag_import
-typedef void(*gpr_cmdline_add_string_type)(gpr_cmdline *cl, const char *name, const char *help, char **value);
-extern gpr_cmdline_add_string_type gpr_cmdline_add_string_import;
-#define gpr_cmdline_add_string gpr_cmdline_add_string_import
-typedef void(*gpr_cmdline_on_extra_arg_type)(gpr_cmdline *cl, const char *name, const char *help, void (*on_extra_arg)(void *user_data, const char *arg), void *user_data);
-extern gpr_cmdline_on_extra_arg_type gpr_cmdline_on_extra_arg_import;
-#define gpr_cmdline_on_extra_arg gpr_cmdline_on_extra_arg_import
-typedef void(*gpr_cmdline_set_survive_failure_type)(gpr_cmdline *cl);
-extern gpr_cmdline_set_survive_failure_type gpr_cmdline_set_survive_failure_import;
-#define gpr_cmdline_set_survive_failure gpr_cmdline_set_survive_failure_import
-typedef int(*gpr_cmdline_parse_type)(gpr_cmdline *cl, int argc, char **argv);
-extern gpr_cmdline_parse_type gpr_cmdline_parse_import;
-#define gpr_cmdline_parse gpr_cmdline_parse_import
-typedef void(*gpr_cmdline_destroy_type)(gpr_cmdline *cl);
-extern gpr_cmdline_destroy_type gpr_cmdline_destroy_import;
-#define gpr_cmdline_destroy gpr_cmdline_destroy_import
-typedef char *(*gpr_cmdline_usage_string_type)(gpr_cmdline *cl, const char *argv0);
-extern gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import;
-#define gpr_cmdline_usage_string gpr_cmdline_usage_string_import
-typedef unsigned(*gpr_cpu_num_cores_type)(void);
-extern gpr_cpu_num_cores_type gpr_cpu_num_cores_import;
-#define gpr_cpu_num_cores gpr_cpu_num_cores_import
-typedef unsigned(*gpr_cpu_current_cpu_type)(void);
-extern gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import;
-#define gpr_cpu_current_cpu gpr_cpu_current_cpu_import
-typedef gpr_histogram *(*gpr_histogram_create_type)(double resolution, double max_bucket_start);
-extern gpr_histogram_create_type gpr_histogram_create_import;
-#define gpr_histogram_create gpr_histogram_create_import
-typedef void(*gpr_histogram_destroy_type)(gpr_histogram *h);
-extern gpr_histogram_destroy_type gpr_histogram_destroy_import;
-#define gpr_histogram_destroy gpr_histogram_destroy_import
-typedef void(*gpr_histogram_add_type)(gpr_histogram *h, double x);
-extern gpr_histogram_add_type gpr_histogram_add_import;
-#define gpr_histogram_add gpr_histogram_add_import
-typedef int(*gpr_histogram_merge_type)(gpr_histogram *dst, const gpr_histogram *src);
-extern gpr_histogram_merge_type gpr_histogram_merge_import;
-#define gpr_histogram_merge gpr_histogram_merge_import
-typedef double(*gpr_histogram_percentile_type)(gpr_histogram *histogram, double percentile);
-extern gpr_histogram_percentile_type gpr_histogram_percentile_import;
-#define gpr_histogram_percentile gpr_histogram_percentile_import
-typedef double(*gpr_histogram_mean_type)(gpr_histogram *histogram);
-extern gpr_histogram_mean_type gpr_histogram_mean_import;
-#define gpr_histogram_mean gpr_histogram_mean_import
-typedef double(*gpr_histogram_stddev_type)(gpr_histogram *histogram);
-extern gpr_histogram_stddev_type gpr_histogram_stddev_import;
-#define gpr_histogram_stddev gpr_histogram_stddev_import
-typedef double(*gpr_histogram_variance_type)(gpr_histogram *histogram);
-extern gpr_histogram_variance_type gpr_histogram_variance_import;
-#define gpr_histogram_variance gpr_histogram_variance_import
-typedef double(*gpr_histogram_maximum_type)(gpr_histogram *histogram);
-extern gpr_histogram_maximum_type gpr_histogram_maximum_import;
-#define gpr_histogram_maximum gpr_histogram_maximum_import
-typedef double(*gpr_histogram_minimum_type)(gpr_histogram *histogram);
-extern gpr_histogram_minimum_type gpr_histogram_minimum_import;
-#define gpr_histogram_minimum gpr_histogram_minimum_import
-typedef double(*gpr_histogram_count_type)(gpr_histogram *histogram);
-extern gpr_histogram_count_type gpr_histogram_count_import;
-#define gpr_histogram_count gpr_histogram_count_import
-typedef double(*gpr_histogram_sum_type)(gpr_histogram *histogram);
-extern gpr_histogram_sum_type gpr_histogram_sum_import;
-#define gpr_histogram_sum gpr_histogram_sum_import
-typedef double(*gpr_histogram_sum_of_squares_type)(gpr_histogram *histogram);
-extern gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import;
-#define gpr_histogram_sum_of_squares gpr_histogram_sum_of_squares_import
-typedef const uint32_t *(*gpr_histogram_get_contents_type)(gpr_histogram *histogram, size_t *count);
-extern gpr_histogram_get_contents_type gpr_histogram_get_contents_import;
-#define gpr_histogram_get_contents gpr_histogram_get_contents_import
-typedef void(*gpr_histogram_merge_contents_type)(gpr_histogram *histogram, const uint32_t *data, size_t data_count, double min_seen, double max_seen, double sum, double sum_of_squares, double count);
-extern gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import;
-#define gpr_histogram_merge_contents gpr_histogram_merge_contents_import
-typedef int(*gpr_join_host_port_type)(char **out, const char *host, int port);
-extern gpr_join_host_port_type gpr_join_host_port_import;
-#define gpr_join_host_port gpr_join_host_port_import
-typedef int(*gpr_split_host_port_type)(const char *name, char **host, char **port);
-extern gpr_split_host_port_type gpr_split_host_port_import;
-#define gpr_split_host_port gpr_split_host_port_import
-typedef char *(*gpr_format_message_type)(int messageid);
-extern gpr_format_message_type gpr_format_message_import;
-#define gpr_format_message gpr_format_message_import
-typedef char *(*gpr_strdup_type)(const char *src);
-extern gpr_strdup_type gpr_strdup_import;
-#define gpr_strdup gpr_strdup_import
-typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(2, 3);
-extern gpr_asprintf_type gpr_asprintf_import;
-#define gpr_asprintf gpr_asprintf_import
-typedef const char *(*gpr_subprocess_binary_extension_type)();
-extern gpr_subprocess_binary_extension_type gpr_subprocess_binary_extension_import;
-#define gpr_subprocess_binary_extension gpr_subprocess_binary_extension_import
-typedef gpr_subprocess *(*gpr_subprocess_create_type)(int argc, const char **argv);
-extern gpr_subprocess_create_type gpr_subprocess_create_import;
-#define gpr_subprocess_create gpr_subprocess_create_import
-typedef void(*gpr_subprocess_destroy_type)(gpr_subprocess *p);
-extern gpr_subprocess_destroy_type gpr_subprocess_destroy_import;
-#define gpr_subprocess_destroy gpr_subprocess_destroy_import
-typedef int(*gpr_subprocess_join_type)(gpr_subprocess *p);
-extern gpr_subprocess_join_type gpr_subprocess_join_import;
-#define gpr_subprocess_join gpr_subprocess_join_import
-typedef void(*gpr_subprocess_interrupt_type)(gpr_subprocess *p);
-extern gpr_subprocess_interrupt_type gpr_subprocess_interrupt_import;
-#define gpr_subprocess_interrupt gpr_subprocess_interrupt_import
-typedef int(*gpr_thd_new_type)(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg, const gpr_thd_options *options);
-extern gpr_thd_new_type gpr_thd_new_import;
-#define gpr_thd_new gpr_thd_new_import
-typedef gpr_thd_options(*gpr_thd_options_default_type)(void);
-extern gpr_thd_options_default_type gpr_thd_options_default_import;
-#define gpr_thd_options_default gpr_thd_options_default_import
-typedef void(*gpr_thd_options_set_detached_type)(gpr_thd_options *options);
-extern gpr_thd_options_set_detached_type gpr_thd_options_set_detached_import;
-#define gpr_thd_options_set_detached gpr_thd_options_set_detached_import
-typedef void(*gpr_thd_options_set_joinable_type)(gpr_thd_options *options);
-extern gpr_thd_options_set_joinable_type gpr_thd_options_set_joinable_import;
-#define gpr_thd_options_set_joinable gpr_thd_options_set_joinable_import
-typedef int(*gpr_thd_options_is_detached_type)(const gpr_thd_options *options);
-extern gpr_thd_options_is_detached_type gpr_thd_options_is_detached_import;
-#define gpr_thd_options_is_detached gpr_thd_options_is_detached_import
-typedef int(*gpr_thd_options_is_joinable_type)(const gpr_thd_options *options);
-extern gpr_thd_options_is_joinable_type gpr_thd_options_is_joinable_import;
-#define gpr_thd_options_is_joinable gpr_thd_options_is_joinable_import
-typedef gpr_thd_id(*gpr_thd_currentid_type)(void);
-extern gpr_thd_currentid_type gpr_thd_currentid_import;
-#define gpr_thd_currentid gpr_thd_currentid_import
-typedef void(*gpr_thd_join_type)(gpr_thd_id t);
-extern gpr_thd_join_type gpr_thd_join_import;
-#define gpr_thd_join gpr_thd_join_import
-
-#ifdef __cplusplus
-extern "C" {
-#endif  /* __cpluslus */
-
-void pygrpc_load_imports(HMODULE library);
-
-#ifdef __cplusplus
-}
-#endif  /* __cpluslus */
-
-#else /* !GPR_WINDOWS */
-
 #include <grpc/byte_buffer.h>
 #include <grpc/byte_buffer_reader.h>
 #include <grpc/compression.h>
@@ -892,6 +47,4 @@ void pygrpc_load_imports(HMODULE library);
 #include <grpc/support/time.h>
 #include <grpc/status.h>
 
-#endif /* !GPR_WINDOWS */
-
 #endif
diff --git a/src/python/grpcio/grpc/_cython/loader.c b/src/python/grpcio/grpc/_cython/loader.c
index b909ad594edb359d520a85ae477ff29400da0f84..34bd8975495221dd83fe70280f860600427388fa 100644
--- a/src/python/grpcio/grpc/_cython/loader.c
+++ b/src/python/grpcio/grpc/_cython/loader.c
@@ -31,36 +31,20 @@
  *
  */
 
+#include <Python.h>
 #include "loader.h"
 
 #ifdef __cplusplus
 extern "C" {
 #endif  /* __cpluslus  */
 
-#if GPR_WINDOWS
-
-int pygrpc_load_core(char *path) {
-  HMODULE grpc_c;
-#ifdef GPR_ARCH_32
-  /* Close your eyes for a moment, it'll all be over soon. */
-  char *six = strrchr(path, '6');
-  *six++ = '3';
-  *six = '2';
-#endif
-  grpc_c = LoadLibraryA(path);
-  if (grpc_c) {
-    pygrpc_load_imports(grpc_c);
-    return 1;
-  }
-
-  return 0;
-}
-
-#else
-
 int pygrpc_load_core(char *path) { return 1; }
 
-#endif  /* !GPR_WINDOWS */
+// Cython doesn't have Py_AtExit bindings, so we call the C_API directly
+int pygrpc_initialize_core(void) {
+  grpc_init();
+  return Py_AtExit(grpc_shutdown) < 0 ? 0 : 1;
+}
 
 #ifdef __cplusplus
 }
diff --git a/src/python/grpcio/grpc/_cython/loader.h b/src/python/grpcio/grpc/_cython/loader.h
index 3b8796d39f7d3ba0b880f5f4405d16501d50e2b7..62fd22520472733fcc821b3e1b5a26f0ad2b6410 100644
--- a/src/python/grpcio/grpc/_cython/loader.h
+++ b/src/python/grpcio/grpc/_cython/loader.h
@@ -39,6 +39,8 @@
 /* Additional inclusions not covered by "imports.generated.h" */
 #include <grpc/byte_buffer_reader.h>
 
+/* TODO(atash) remove cruft */
+
 #ifdef __cplusplus
 extern "C" {
 #endif  /* __cpluslus */
@@ -46,6 +48,11 @@ extern "C" {
 /* Attempts to load the core if necessary, and return non-zero upon succes. */
 int pygrpc_load_core(char *path);
 
+/* Initializes grpc and registers grpc_shutdown() to be called right before
+ * interpreter exit.  Returns non-zero upon success.
+ */
+int pygrpc_initialize_core(void);
+
 #ifdef __cplusplus
 }
 #endif  /* __cpluslus */
diff --git a/src/python/grpcio/grpc/_links/invocation.py b/src/python/grpcio/grpc/_links/invocation.py
deleted file mode 100644
index 003653e1c847434b59cec3a596d8b69ca70d4b98..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/_links/invocation.py
+++ /dev/null
@@ -1,453 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire."""
-
-import abc
-import enum
-import logging
-import threading
-import time
-
-import six
-
-from grpc._adapter import _intermediary_low
-from grpc._links import _constants
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.foundation import activated
-from grpc.framework.foundation import logging_pool
-from grpc.framework.foundation import relay
-from grpc.framework.interfaces.links import links
-
-_IDENTITY = lambda x: x
-
-_STOP = _intermediary_low.Event.Kind.STOP
-_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED
-_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED
-_READ = _intermediary_low.Event.Kind.READ_ACCEPTED
-_METADATA = _intermediary_low.Event.Kind.METADATA_ACCEPTED
-_FINISH = _intermediary_low.Event.Kind.FINISH
-
-
-@enum.unique
-class _Read(enum.Enum):
-  AWAITING_METADATA = 'awaiting metadata'
-  READING = 'reading'
-  AWAITING_ALLOWANCE = 'awaiting allowance'
-  CLOSED = 'closed'
-
-
-@enum.unique
-class _HighWrite(enum.Enum):
-  OPEN = 'open'
-  CLOSED = 'closed'
-
-
-@enum.unique
-class _LowWrite(enum.Enum):
-  OPEN = 'OPEN'
-  ACTIVE = 'ACTIVE'
-  CLOSED = 'CLOSED'
-
-
-class _Context(beta_interfaces.GRPCInvocationContext):
-
-  def __init__(self):
-    self._lock = threading.Lock()
-    self._disable_next_compression = False
-
-  def disable_next_request_compression(self):
-    with self._lock:
-      self._disable_next_compression = True
-
-  def next_compression_disabled(self):
-    with self._lock:
-      disabled = self._disable_next_compression
-      self._disable_next_compression = False
-      return disabled
-
-
-class _RPCState(object):
-
-  def __init__(
-      self, call, request_serializer, response_deserializer, sequence_number,
-      read, allowance, high_write, low_write, due, context):
-    self.call = call
-    self.request_serializer = request_serializer
-    self.response_deserializer = response_deserializer
-    self.sequence_number = sequence_number
-    self.read = read
-    self.allowance = allowance
-    self.high_write = high_write
-    self.low_write = low_write
-    self.due = due
-    self.context = context
-
-
-def _no_longer_due(kind, rpc_state, key, rpc_states):
-  rpc_state.due.remove(kind)
-  if not rpc_state.due:
-    del rpc_states[key]
-
-
-class _Kernel(object):
-
-  def __init__(
-      self, channel, host, metadata_transformer, request_serializers,
-      response_deserializers, ticket_relay):
-    self._lock = threading.Lock()
-    self._channel = channel
-    self._host = host
-    self._metadata_transformer = metadata_transformer
-    self._request_serializers = request_serializers
-    self._response_deserializers = response_deserializers
-    self._relay = ticket_relay
-
-    self._completion_queue = None
-    self._rpc_states = {}
-    self._pool = None
-
-  def _on_write_event(self, operation_id, unused_event, rpc_state):
-    if rpc_state.high_write is _HighWrite.CLOSED:
-      rpc_state.call.complete(operation_id)
-      rpc_state.due.add(_COMPLETE)
-      rpc_state.due.remove(_WRITE)
-      rpc_state.low_write = _LowWrite.CLOSED
-    else:
-      ticket = links.Ticket(
-          operation_id, rpc_state.sequence_number, None, None, None, None, 1,
-          None, None, None, None, None, None, None)
-      rpc_state.sequence_number += 1
-      self._relay.add_value(ticket)
-      rpc_state.low_write = _LowWrite.OPEN
-      _no_longer_due(_WRITE, rpc_state, operation_id, self._rpc_states)
-
-  def _on_read_event(self, operation_id, event, rpc_state):
-    if event.bytes is None or _FINISH not in rpc_state.due:
-      rpc_state.read = _Read.CLOSED
-      _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states)
-    else:
-      if 0 < rpc_state.allowance:
-        rpc_state.allowance -= 1
-        rpc_state.call.read(operation_id)
-      else:
-        rpc_state.read = _Read.AWAITING_ALLOWANCE
-        _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states)
-      ticket = links.Ticket(
-          operation_id, rpc_state.sequence_number, None, None, None, None, None,
-          None, rpc_state.response_deserializer(event.bytes), None, None, None,
-          None, None)
-      rpc_state.sequence_number += 1
-      self._relay.add_value(ticket)
-
-  def _on_metadata_event(self, operation_id, event, rpc_state):
-    if _FINISH in rpc_state.due:
-      rpc_state.allowance -= 1
-      rpc_state.call.read(operation_id)
-      rpc_state.read = _Read.READING
-      rpc_state.due.add(_READ)
-      rpc_state.due.remove(_METADATA)
-      ticket = links.Ticket(
-          operation_id, rpc_state.sequence_number, None, None,
-          links.Ticket.Subscription.FULL, None, None, event.metadata, None,
-          None, None, None, None, None)
-      rpc_state.sequence_number += 1
-      self._relay.add_value(ticket)
-    else:
-      _no_longer_due(_METADATA, rpc_state, operation_id, self._rpc_states)
-
-  def _on_finish_event(self, operation_id, event, rpc_state):
-    _no_longer_due(_FINISH, rpc_state, operation_id, self._rpc_states)
-    if event.status.code == _intermediary_low.Code.OK:
-      termination = links.Ticket.Termination.COMPLETION
-    elif event.status.code == _intermediary_low.Code.CANCELLED:
-      termination = links.Ticket.Termination.CANCELLATION
-    elif event.status.code == _intermediary_low.Code.DEADLINE_EXCEEDED:
-      termination = links.Ticket.Termination.EXPIRATION
-    elif event.status.code == _intermediary_low.Code.UNIMPLEMENTED:
-      termination = links.Ticket.Termination.REMOTE_FAILURE
-    elif event.status.code == _intermediary_low.Code.UNKNOWN:
-      termination = links.Ticket.Termination.LOCAL_FAILURE
-    else:
-      termination = links.Ticket.Termination.TRANSMISSION_FAILURE
-    code = _constants.LOW_STATUS_CODE_TO_HIGH_STATUS_CODE[event.status.code]
-    ticket = links.Ticket(
-        operation_id, rpc_state.sequence_number, None, None, None, None, None,
-        None, None, event.metadata, code, event.status.details, termination,
-        None)
-    rpc_state.sequence_number += 1
-    self._relay.add_value(ticket)
-
-  def _spin(self, completion_queue):
-    while True:
-      event = completion_queue.get(None)
-      with self._lock:
-        rpc_state = self._rpc_states.get(event.tag, None)
-        if event.kind is _STOP:
-          pass
-        elif event.kind is _WRITE:
-          self._on_write_event(event.tag, event, rpc_state)
-        elif event.kind is _METADATA:
-          self._on_metadata_event(event.tag, event, rpc_state)
-        elif event.kind is _READ:
-          self._on_read_event(event.tag, event, rpc_state)
-        elif event.kind is _FINISH:
-          self._on_finish_event(event.tag, event, rpc_state)
-        elif event.kind is _COMPLETE:
-          _no_longer_due(_COMPLETE, rpc_state, event.tag, self._rpc_states)
-        else:
-          logging.error('Illegal RPC event! %s', (event,))
-
-        if self._completion_queue is None and not self._rpc_states:
-          completion_queue.stop()
-          return
-
-  def _invoke(
-      self, operation_id, group, method, initial_metadata, payload, termination,
-      timeout, allowance, options):
-    """Invoke an RPC.
-
-    Args:
-      operation_id: Any object to be used as an operation ID for the RPC.
-      group: The group to which the RPC method belongs.
-      method: The RPC method name.
-      initial_metadata: The initial metadata object for the RPC.
-      payload: A payload object for the RPC or None if no payload was given at
-        invocation-time.
-      termination: A links.Ticket.Termination value or None indicated whether or
-        not more writes will follow from this side of the RPC.
-      timeout: A duration of time in seconds to allow for the RPC.
-      allowance: The number of payloads (beyond the free first one) that the
-        local ticket exchange mate has granted permission to be read.
-      options: A beta_interfaces.GRPCCallOptions value or None.
-    """
-    if termination is links.Ticket.Termination.COMPLETION:
-      high_write = _HighWrite.CLOSED
-    elif termination is None:
-      high_write = _HighWrite.OPEN
-    else:
-      return
-
-    transformed_initial_metadata = self._metadata_transformer(initial_metadata)
-    request_serializer = self._request_serializers.get(
-        (group, method), _IDENTITY)
-    response_deserializer = self._response_deserializers.get(
-        (group, method), _IDENTITY)
-
-    call = _intermediary_low.Call(
-        self._channel, self._completion_queue, '/%s/%s' % (group, method),
-        self._host, time.time() + timeout)
-    if options is not None and options.credentials is not None:
-      call.set_credentials(options.credentials._low_credentials)
-    if transformed_initial_metadata is not None:
-      for metadata_key, metadata_value in transformed_initial_metadata:
-        call.add_metadata(metadata_key, metadata_value)
-    call.invoke(self._completion_queue, operation_id, operation_id)
-    if payload is None:
-      if high_write is _HighWrite.CLOSED:
-        call.complete(operation_id)
-        low_write = _LowWrite.CLOSED
-        due = set((_METADATA, _COMPLETE, _FINISH,))
-      else:
-        low_write = _LowWrite.OPEN
-        due = set((_METADATA, _FINISH,))
-    else:
-      if options is not None and options.disable_compression:
-        flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS
-      else:
-        flags = 0
-      call.write(request_serializer(payload), operation_id, flags)
-      low_write = _LowWrite.ACTIVE
-      due = set((_WRITE, _METADATA, _FINISH,))
-    context = _Context()
-    self._rpc_states[operation_id] = _RPCState(
-        call, request_serializer, response_deserializer, 1,
-        _Read.AWAITING_METADATA, 1 if allowance is None else (1 + allowance),
-        high_write, low_write, due, context)
-    protocol = links.Protocol(links.Protocol.Kind.INVOCATION_CONTEXT, context)
-    ticket = links.Ticket(
-        operation_id, 0, None, None, None, None, None, None, None, None, None,
-        None, None, protocol)
-    self._relay.add_value(ticket)
-
-  def _advance(self, operation_id, rpc_state, payload, termination, allowance):
-    if payload is not None:
-      disable_compression = rpc_state.context.next_compression_disabled()
-      if disable_compression:
-        flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS
-      else:
-        flags = 0
-      rpc_state.call.write(
-          rpc_state.request_serializer(payload), operation_id, flags)
-      rpc_state.low_write = _LowWrite.ACTIVE
-      rpc_state.due.add(_WRITE)
-
-    if allowance is not None:
-      if rpc_state.read is _Read.AWAITING_ALLOWANCE:
-        rpc_state.allowance += allowance - 1
-        rpc_state.call.read(operation_id)
-        rpc_state.read = _Read.READING
-        rpc_state.due.add(_READ)
-      else:
-        rpc_state.allowance += allowance
-
-    if termination is links.Ticket.Termination.COMPLETION:
-      rpc_state.high_write = _HighWrite.CLOSED
-      if rpc_state.low_write is _LowWrite.OPEN:
-        rpc_state.call.complete(operation_id)
-        rpc_state.due.add(_COMPLETE)
-        rpc_state.low_write = _LowWrite.CLOSED
-    elif termination is not None:
-      rpc_state.call.cancel()
-
-  def add_ticket(self, ticket):
-    with self._lock:
-      if ticket.sequence_number == 0:
-        if self._completion_queue is None:
-          logging.error('Received invocation ticket %s after stop!', ticket)
-        else:
-          if (ticket.protocol is not None and
-              ticket.protocol.kind is links.Protocol.Kind.CALL_OPTION):
-            grpc_call_options = ticket.protocol.value
-          else:
-            grpc_call_options = None
-          self._invoke(
-              ticket.operation_id, ticket.group, ticket.method,
-              ticket.initial_metadata, ticket.payload, ticket.termination,
-              ticket.timeout, ticket.allowance, grpc_call_options)
-      else:
-        rpc_state = self._rpc_states.get(ticket.operation_id)
-        if rpc_state is not None:
-          self._advance(
-              ticket.operation_id, rpc_state, ticket.payload,
-              ticket.termination, ticket.allowance)
-
-  def start(self):
-    """Starts this object.
-
-    This method must be called before attempting to exchange tickets with this
-    object.
-    """
-    with self._lock:
-      self._completion_queue = _intermediary_low.CompletionQueue()
-      self._pool = logging_pool.pool(1)
-      self._pool.submit(self._spin, self._completion_queue)
-
-  def stop(self):
-    """Stops this object.
-
-    This method must be called for proper termination of this object, and no
-    attempts to exchange tickets with this object may be made after this method
-    has been called.
-    """
-    with self._lock:
-      if not self._rpc_states:
-        self._completion_queue.stop()
-      self._completion_queue = None
-      pool = self._pool
-    pool.shutdown(wait=True)
-
-
-class InvocationLink(six.with_metaclass(abc.ABCMeta, links.Link, activated.Activated)):
-  """A links.Link for use on the invocation-side of a gRPC connection.
-
-  Implementations of this interface are only valid for use when activated.
-  """
-
-
-class _InvocationLink(InvocationLink):
-
-  def __init__(
-      self, channel, host, metadata_transformer, request_serializers,
-      response_deserializers):
-    self._relay = relay.relay(None)
-    self._kernel = _Kernel(
-        channel, host,
-        _IDENTITY if metadata_transformer is None else metadata_transformer,
-        {} if request_serializers is None else request_serializers,
-        {} if response_deserializers is None else response_deserializers,
-        self._relay)
-
-  def _start(self):
-    self._relay.start()
-    self._kernel.start()
-    return self
-
-  def _stop(self):
-    self._kernel.stop()
-    self._relay.stop()
-
-  def accept_ticket(self, ticket):
-    """See links.Link.accept_ticket for specification."""
-    self._kernel.add_ticket(ticket)
-
-  def join_link(self, link):
-    """See links.Link.join_link for specification."""
-    self._relay.set_behavior(link.accept_ticket)
-
-  def __enter__(self):
-    """See activated.Activated.__enter__ for specification."""
-    return self._start()
-
-  def __exit__(self, exc_type, exc_val, exc_tb):
-    """See activated.Activated.__exit__ for specification."""
-    self._stop()
-    return False
-
-  def start(self):
-    """See activated.Activated.start for specification."""
-    return self._start()
-
-  def stop(self):
-    """See activated.Activated.stop for specification."""
-    self._stop()
-
-
-def invocation_link(
-    channel, host, metadata_transformer, request_serializers,
-    response_deserializers):
-  """Creates an InvocationLink.
-
-  Args:
-    channel: An _intermediary_low.Channel for use by the link.
-    host: The host to specify when invoking RPCs.
-    metadata_transformer: A callable that takes an invocation-side initial
-      metadata value and returns another metadata value to send in its place.
-      May be None.
-    request_serializers: A dict from group-method pair to request object
-      serialization behavior.
-    response_deserializers: A dict from group-method pair to response object
-      deserialization behavior.
-
-  Returns:
-    An InvocationLink.
-  """
-  return _InvocationLink(
-      channel, host, metadata_transformer, request_serializers,
-      response_deserializers)
diff --git a/src/python/grpcio/grpc/_links/service.py b/src/python/grpcio/grpc/_links/service.py
deleted file mode 100644
index 5fc4994ca0e0b26534b7de860bd70fcaf8d61ab3..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/_links/service.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The RPC-service-side bridge between RPC Framework and GRPC-on-the-wire."""
-
-import abc
-import enum
-import logging
-import threading
-import six
-import time
-
-from grpc._adapter import _intermediary_low
-from grpc._links import _constants
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.foundation import logging_pool
-from grpc.framework.foundation import relay
-from grpc.framework.interfaces.links import links
-
-_IDENTITY = lambda x: x
-
-_TERMINATION_KIND_TO_CODE = {
-    links.Ticket.Termination.COMPLETION: _intermediary_low.Code.OK,
-    links.Ticket.Termination.CANCELLATION: _intermediary_low.Code.CANCELLED,
-    links.Ticket.Termination.EXPIRATION:
-        _intermediary_low.Code.DEADLINE_EXCEEDED,
-    links.Ticket.Termination.SHUTDOWN: _intermediary_low.Code.UNAVAILABLE,
-    links.Ticket.Termination.RECEPTION_FAILURE: _intermediary_low.Code.INTERNAL,
-    links.Ticket.Termination.TRANSMISSION_FAILURE:
-        _intermediary_low.Code.INTERNAL,
-    links.Ticket.Termination.LOCAL_FAILURE: _intermediary_low.Code.UNKNOWN,
-    links.Ticket.Termination.REMOTE_FAILURE: _intermediary_low.Code.UNKNOWN,
-}
-
-_STOP = _intermediary_low.Event.Kind.STOP
-_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED
-_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED
-_SERVICE = _intermediary_low.Event.Kind.SERVICE_ACCEPTED
-_READ = _intermediary_low.Event.Kind.READ_ACCEPTED
-_FINISH = _intermediary_low.Event.Kind.FINISH
-
-
-@enum.unique
-class _Read(enum.Enum):
-  READING = 'reading'
-  # TODO(issue 2916): This state will again be necessary after eliminating the
-  # "early_read" field of _RPCState and going back to only reading when granted
-  # allowance to read.
-  # AWAITING_ALLOWANCE = 'awaiting allowance'
-  CLOSED = 'closed'
-
-
-@enum.unique
-class _HighWrite(enum.Enum):
-  OPEN = 'open'
-  CLOSED = 'closed'
-
-
-@enum.unique
-class _LowWrite(enum.Enum):
-  """The possible categories of low-level write state."""
-
-  OPEN = 'OPEN'
-  ACTIVE = 'ACTIVE'
-  CLOSED = 'CLOSED'
-
-
-class _Context(beta_interfaces.GRPCServicerContext):
-
-  def __init__(self, call):
-    self._lock = threading.Lock()
-    self._call = call
-    self._disable_next_compression = False
-
-  def peer(self):
-    with self._lock:
-      return self._call.peer()
-
-  def disable_next_response_compression(self):
-    with self._lock:
-      self._disable_next_compression = True
-
-  def next_compression_disabled(self):
-    with self._lock:
-      disabled = self._disable_next_compression
-      self._disable_next_compression = False
-      return disabled
-
-
-class _RPCState(object):
-
-  def __init__(
-      self, request_deserializer, response_serializer, sequence_number, read,
-      early_read, allowance, high_write, low_write, premetadataed,
-      terminal_metadata, code, message, due, context):
-    self.request_deserializer = request_deserializer
-    self.response_serializer = response_serializer
-    self.sequence_number = sequence_number
-    self.read = read
-    # TODO(issue 2916): Eliminate this by eliminating the necessity of calling
-    # call.read just to advance the RPC.
-    self.early_read = early_read  # A raw (not deserialized) read.
-    self.allowance = allowance
-    self.high_write = high_write
-    self.low_write = low_write
-    self.premetadataed = premetadataed
-    self.terminal_metadata = terminal_metadata
-    self.code = code
-    self.message = message
-    self.due = due
-    self.context = context
-
-
-def _no_longer_due(kind, rpc_state, key, rpc_states):
-  rpc_state.due.remove(kind)
-  if not rpc_state.due:
-    del rpc_states[key]
-
-
-def _metadatafy(call, metadata):
-  for metadata_key, metadata_value in metadata:
-    call.add_metadata(metadata_key, metadata_value)
-
-
-def _status(termination_kind, high_code, details):
-  low_details = b'' if details is None else details
-  if high_code is None:
-    low_code = _TERMINATION_KIND_TO_CODE[termination_kind]
-  else:
-    low_code = _constants.HIGH_STATUS_CODE_TO_LOW_STATUS_CODE[high_code]
-  return _intermediary_low.Status(low_code, low_details)
-
-
-class _Kernel(object):
-
-  def __init__(self, request_deserializers, response_serializers, ticket_relay):
-    self._lock = threading.Lock()
-    self._request_deserializers = request_deserializers
-    self._response_serializers = response_serializers
-    self._relay = ticket_relay
-
-    self._completion_queue = None
-    self._due = set()
-    self._server = None
-    self._rpc_states = {}
-    self._pool = None
-
-  def _on_service_acceptance_event(self, event, server):
-    server.service(None)
-
-    service_acceptance = event.service_acceptance
-    call = service_acceptance.call
-    call.accept(self._completion_queue, call)
-    try:
-      service_method = service_acceptance.method
-      if six.PY3:
-          service_method = service_method.decode('latin1')
-      group, method = service_method.split('/')[1:3]
-    except ValueError:
-      logging.info('Illegal path "%s"!', service_acceptance.method)
-      return
-    request_deserializer = self._request_deserializers.get(
-        (group, method), _IDENTITY)
-    response_serializer = self._response_serializers.get(
-        (group, method), _IDENTITY)
-
-    call.read(call)
-    context = _Context(call)
-    self._rpc_states[call] = _RPCState(
-        request_deserializer, response_serializer, 1, _Read.READING, None, 1,
-        _HighWrite.OPEN, _LowWrite.OPEN, False, None, None, None,
-        set((_READ, _FINISH,)), context)
-    protocol = links.Protocol(links.Protocol.Kind.SERVICER_CONTEXT, context)
-    ticket = links.Ticket(
-        call, 0, group, method, links.Ticket.Subscription.FULL,
-        service_acceptance.deadline - time.time(), None, event.metadata, None,
-        None, None, None, None, protocol)
-    self._relay.add_value(ticket)
-
-  def _on_read_event(self, event):
-    call = event.tag
-    rpc_state = self._rpc_states[call]
-
-    if event.bytes is None:
-      rpc_state.read = _Read.CLOSED
-      payload = None
-      termination = links.Ticket.Termination.COMPLETION
-      _no_longer_due(_READ, rpc_state, call, self._rpc_states)
-    else:
-      if 0 < rpc_state.allowance:
-        payload = rpc_state.request_deserializer(event.bytes)
-        termination = None
-        rpc_state.allowance -= 1
-        call.read(call)
-      else:
-        rpc_state.early_read = event.bytes
-        _no_longer_due(_READ, rpc_state, call, self._rpc_states)
-        return
-        # TODO(issue 2916): Instead of returning:
-        # rpc_state.read = _Read.AWAITING_ALLOWANCE
-    ticket = links.Ticket(
-        call, rpc_state.sequence_number, None, None, None, None, None, None,
-        payload, None, None, None, termination, None)
-    rpc_state.sequence_number += 1
-    self._relay.add_value(ticket)
-
-  def _on_write_event(self, event):
-    call = event.tag
-    rpc_state = self._rpc_states[call]
-
-    if rpc_state.high_write is _HighWrite.CLOSED:
-      if rpc_state.terminal_metadata is not None:
-        _metadatafy(call, rpc_state.terminal_metadata)
-      status = _status(
-          links.Ticket.Termination.COMPLETION, rpc_state.code,
-          rpc_state.message)
-      call.status(status, call)
-      rpc_state.low_write = _LowWrite.CLOSED
-      rpc_state.due.add(_COMPLETE)
-      rpc_state.due.remove(_WRITE)
-    else:
-      ticket = links.Ticket(
-          call, rpc_state.sequence_number, None, None, None, None, 1, None,
-          None, None, None, None, None, None)
-      rpc_state.sequence_number += 1
-      self._relay.add_value(ticket)
-      rpc_state.low_write = _LowWrite.OPEN
-      _no_longer_due(_WRITE, rpc_state, call, self._rpc_states)
-
-  def _on_finish_event(self, event):
-    call = event.tag
-    rpc_state = self._rpc_states[call]
-    _no_longer_due(_FINISH, rpc_state, call, self._rpc_states)
-    code = event.status.code
-    if code == _intermediary_low.Code.OK:
-      return
-
-    if code == _intermediary_low.Code.CANCELLED:
-      termination = links.Ticket.Termination.CANCELLATION
-    elif code == _intermediary_low.Code.DEADLINE_EXCEEDED:
-      termination = links.Ticket.Termination.EXPIRATION
-    else:
-      termination = links.Ticket.Termination.TRANSMISSION_FAILURE
-    ticket = links.Ticket(
-        call, rpc_state.sequence_number, None, None, None, None, None, None,
-        None, None, None, None, termination, None)
-    rpc_state.sequence_number += 1
-    self._relay.add_value(ticket)
-
-  def _spin(self, completion_queue, server):
-    while True:
-      event = completion_queue.get(None)
-      with self._lock:
-        if event.kind is _STOP:
-          self._due.remove(_STOP)
-        elif event.kind is _READ:
-          self._on_read_event(event)
-        elif event.kind is _WRITE:
-          self._on_write_event(event)
-        elif event.kind is _COMPLETE:
-          _no_longer_due(
-              _COMPLETE, self._rpc_states.get(event.tag), event.tag,
-              self._rpc_states)
-        elif event.kind is _intermediary_low.Event.Kind.FINISH:
-          self._on_finish_event(event)
-        elif event.kind is _SERVICE:
-          if self._server is None:
-            self._due.remove(_SERVICE)
-          else:
-            self._on_service_acceptance_event(event, server)
-        else:
-          logging.error('Illegal event! %s', (event,))
-
-        if not self._due and not self._rpc_states:
-          completion_queue.stop()
-          return
-
-  def add_ticket(self, ticket):
-    with self._lock:
-      call = ticket.operation_id
-      rpc_state = self._rpc_states.get(call)
-      if rpc_state is None:
-        return
-
-      if ticket.initial_metadata is not None:
-        _metadatafy(call, ticket.initial_metadata)
-        call.premetadata()
-        rpc_state.premetadataed = True
-      elif not rpc_state.premetadataed:
-        if (ticket.terminal_metadata is not None or
-            ticket.payload is not None or
-            ticket.termination is not None or
-            ticket.code is not None or
-            ticket.message is not None):
-          call.premetadata()
-          rpc_state.premetadataed = True
-
-      if ticket.allowance is not None:
-        if rpc_state.early_read is None:
-          rpc_state.allowance += ticket.allowance
-        else:
-          payload = rpc_state.request_deserializer(rpc_state.early_read)
-          rpc_state.allowance += ticket.allowance - 1
-          rpc_state.early_read = None
-          if rpc_state.read is _Read.READING:
-            call.read(call)
-            rpc_state.due.add(_READ)
-            termination = None
-          else:
-            termination = links.Ticket.Termination.COMPLETION
-          early_read_ticket = links.Ticket(
-              call, rpc_state.sequence_number, None, None, None, None, None,
-              None, payload, None, None, None, termination, None)
-          rpc_state.sequence_number += 1
-          self._relay.add_value(early_read_ticket)
-
-      if ticket.payload is not None:
-        disable_compression = rpc_state.context.next_compression_disabled()
-        if disable_compression:
-          flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS
-        else:
-          flags = 0
-        call.write(rpc_state.response_serializer(ticket.payload), call, flags)
-        rpc_state.due.add(_WRITE)
-        rpc_state.low_write = _LowWrite.ACTIVE
-
-      if ticket.terminal_metadata is not None:
-        rpc_state.terminal_metadata = ticket.terminal_metadata
-      if ticket.code is not None:
-        rpc_state.code = ticket.code
-      if ticket.message is not None:
-        rpc_state.message = ticket.message
-
-      if ticket.termination is links.Ticket.Termination.COMPLETION:
-        rpc_state.high_write = _HighWrite.CLOSED
-        if rpc_state.low_write is _LowWrite.OPEN:
-          if rpc_state.terminal_metadata is not None:
-            _metadatafy(call, rpc_state.terminal_metadata)
-          status = _status(
-              links.Ticket.Termination.COMPLETION, rpc_state.code,
-              rpc_state.message)
-          call.status(status, call)
-          rpc_state.due.add(_COMPLETE)
-          rpc_state.low_write = _LowWrite.CLOSED
-      elif ticket.termination is not None:
-        if rpc_state.terminal_metadata is not None:
-          _metadatafy(call, rpc_state.terminal_metadata)
-        status = _status(
-            ticket.termination, rpc_state.code, rpc_state.message)
-        call.status(status, call)
-        rpc_state.due.add(_COMPLETE)
-
-  def add_port(self, address, server_credentials):
-    with self._lock:
-      if self._server is None:
-        self._completion_queue = _intermediary_low.CompletionQueue()
-        self._server = _intermediary_low.Server(self._completion_queue)
-      if server_credentials is None:
-        return self._server.add_http2_addr(address)
-      else:
-        return self._server.add_secure_http2_addr(address, server_credentials)
-
-  def start(self):
-    with self._lock:
-      if self._server is None:
-        self._completion_queue = _intermediary_low.CompletionQueue()
-        self._server = _intermediary_low.Server(self._completion_queue)
-      self._pool = logging_pool.pool(1)
-      self._pool.submit(self._spin, self._completion_queue, self._server)
-      self._server.start()
-      self._server.service(None)
-      self._due.add(_SERVICE)
-
-  def begin_stop(self):
-    with self._lock:
-      self._server.stop()
-      self._due.add(_STOP)
-      self._server = None
-
-  def end_stop(self):
-    with self._lock:
-      pool = self._pool
-    pool.shutdown(wait=True)
-
-
-class ServiceLink(links.Link):
-  """A links.Link for use on the service-side of a gRPC connection.
-
-  Implementations of this interface are only valid for use between calls to
-  their start method and one of their stop methods.
-  """
-
-  @abc.abstractmethod
-  def add_port(self, address, server_credentials):
-    """Adds a port on which to service RPCs after this link has been started.
-
-    Args:
-      address: The address on which to service RPCs with a port number of zero
-        requesting that a port number be automatically selected and used.
-      server_credentials: An _intermediary_low.ServerCredentials object, or
-        None for insecure service.
-
-    Returns:
-      An integer port on which RPCs will be serviced after this link has been
-        started. This is typically the same number as the port number contained
-        in the passed address, but will likely be different if the port number
-        contained in the passed address was zero.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def start(self):
-    """Starts this object.
-
-    This method must be called before attempting to use this Link in ticket
-    exchange.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def begin_stop(self):
-    """Indicate imminent link stop and immediate rejection of new RPCs.
-
-    New RPCs will be rejected as soon as this method is called, but ongoing RPCs
-    will be allowed to continue until they terminate. This method does not
-    block.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def end_stop(self):
-    """Finishes stopping this link.
-
-    begin_stop must have been called exactly once before calling this method.
-
-    All in-progress RPCs will be terminated immediately.
-    """
-    raise NotImplementedError()
-
-
-class _ServiceLink(ServiceLink):
-
-  def __init__(self, request_deserializers, response_serializers):
-    self._relay = relay.relay(None)
-    self._kernel = _Kernel(
-        {} if request_deserializers is None else request_deserializers,
-        {} if response_serializers is None else response_serializers,
-        self._relay)
-
-  def accept_ticket(self, ticket):
-    self._kernel.add_ticket(ticket)
-
-  def join_link(self, link):
-    self._relay.set_behavior(link.accept_ticket)
-
-  def add_port(self, address, server_credentials):
-    return self._kernel.add_port(address, server_credentials)
-
-  def start(self):
-    self._relay.start()
-    return self._kernel.start()
-
-  def begin_stop(self):
-    self._kernel.begin_stop()
-
-  def end_stop(self):
-    self._kernel.end_stop()
-    self._relay.stop()
-
-
-def service_link(request_deserializers, response_serializers):
-  """Creates a ServiceLink.
-
-  Args:
-    request_deserializers: A dict from group-method pair to request object
-      deserialization behavior.
-    response_serializers: A dict from group-method pair to response ojbect
-      serialization behavior.
-
-  Returns:
-    A ServiceLink.
-  """
-  return _ServiceLink(request_deserializers, response_serializers)
diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py
index 4e9cfe710cd17c948e8261dac46d6d2cf7d7f4c2..7cb5218c22bd8bb8895f2febdc758605c6484cb4 100644
--- a/src/python/grpcio/grpc/_plugin_wrapping.py
+++ b/src/python/grpcio/grpc/_plugin_wrapping.py
@@ -31,6 +31,7 @@ import collections
 import threading
 
 import grpc
+from grpc import _common
 from grpc._cython import cygrpc
 
 
@@ -62,17 +63,16 @@ class _WrappedCygrpcCallback(object):
     # TODO(atash) translate different Exception superclasses into different
     # status codes.
     self.cygrpc_callback(
-        cygrpc.Metadata([]), cygrpc.StatusCode.internal, error.message)
+        _common.EMPTY_METADATA, cygrpc.StatusCode.internal,
+        _common.encode(str(error)))
 
   def _invoke_success(self, metadata):
     try:
-      cygrpc_metadata = cygrpc.Metadata(
-          cygrpc.Metadatum(key, value)
-          for key, value in metadata)
+      cygrpc_metadata = _common.cygrpc_metadata(metadata)
     except Exception as error:
       self._invoke_failure(error)
       return
-    self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, '')
+    self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, b'')
 
   def __call__(self, metadata, error):
     with self.is_called_lock:
@@ -101,7 +101,7 @@ class _WrappedPlugin(object):
   def __call__(self, context, cygrpc_callback):
     wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback)
     wrapped_context = AuthMetadataContext(
-        context.service_url, context.method_name)
+        _common.decode(context.service_url), _common.decode(context.method_name))
     try:
       self.plugin(
           wrapped_context, AuthMetadataPluginCallback(wrapped_cygrpc_callback))
@@ -120,4 +120,4 @@ def call_credentials_metadata_plugin(plugin, name):
       plugin's invocation must be non-blocking.
   """
   return cygrpc.call_credentials_metadata_plugin(
-      cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), name))
+      cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), _common.encode(name)))
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index bf20f15f7291fd66f555b61d2b5d4babb1129e0d..94a13bfb2fbf225c403e59ea9c1adc31de3bd391 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -87,7 +87,7 @@ def _abortion_code(state, code):
 
 
 def _details(state):
-  return '' if state.details is None else state.details
+  return b'' if state.details is None else state.details
 
 
 class _HandlerCallDetails(
@@ -146,18 +146,18 @@ def _abort(state, call, code, details):
           cygrpc.operation_send_initial_metadata(
               _EMPTY_METADATA, _EMPTY_FLAGS),
           cygrpc.operation_send_status_from_server(
-              _common.metadata(state.trailing_metadata), effective_code,
+              _common.cygrpc_metadata(state.trailing_metadata), effective_code,
               effective_details, _EMPTY_FLAGS),
       )
       token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
     else:
       operations = (
           cygrpc.operation_send_status_from_server(
-              _common.metadata(state.trailing_metadata), effective_code,
+              _common.cygrpc_metadata(state.trailing_metadata), effective_code,
               effective_details, _EMPTY_FLAGS),
       )
       token = _SEND_STATUS_FROM_SERVER_TOKEN
-    call.start_batch(
+    call.start_server_batch(
         cygrpc.Operations(operations),
         _send_status_from_server(state, token))
     state.statused = True
@@ -191,7 +191,7 @@ def _receive_message(state, call, request_deserializer):
         if request is None:
           _abort(
               state, call, cygrpc.StatusCode.internal,
-              'Exception deserializing request!')
+              b'Exception deserializing request!')
         else:
           state.request = request
         state.condition.notify_all()
@@ -244,10 +244,10 @@ class _Context(grpc.ServicerContext):
       self._state.disable_next_compression = True
 
   def invocation_metadata(self):
-    return self._rpc_event.request_metadata
+    return _common.application_metadata(self._rpc_event.request_metadata)
 
   def peer(self):
-    return self._rpc_event.operation_call.peer()
+    return _common.decode(self._rpc_event.operation_call.peer())
 
   def send_initial_metadata(self, initial_metadata):
     with self._state.condition:
@@ -256,8 +256,8 @@ class _Context(grpc.ServicerContext):
       else:
         if self._state.initial_metadata_allowed:
           operation = cygrpc.operation_send_initial_metadata(
-              _common.metadata(initial_metadata), _EMPTY_FLAGS)
-          self._rpc_event.operation_call.start_batch(
+              _common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
+          self._rpc_event.operation_call.start_server_batch(
               cygrpc.Operations((operation,)),
               _send_initial_metadata(self._state))
           self._state.initial_metadata_allowed = False
@@ -267,7 +267,8 @@ class _Context(grpc.ServicerContext):
 
   def set_trailing_metadata(self, trailing_metadata):
     with self._state.condition:
-      self._state.trailing_metadata = trailing_metadata
+      self._state.trailing_metadata = _common.cygrpc_metadata(
+          trailing_metadata)
 
   def set_code(self, code):
     with self._state.condition:
@@ -275,7 +276,7 @@ class _Context(grpc.ServicerContext):
 
   def set_details(self, details):
     with self._state.condition:
-      self._state.details = details
+      self._state.details = _common.encode(details)
 
 
 class _RequestIterator(object):
@@ -291,7 +292,7 @@ class _RequestIterator(object):
     elif self._state.client is _CLOSED or self._state.statused:
       raise StopIteration()
     else:
-      self._call.start_batch(
+      self._call.start_server_batch(
           cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
           _receive_message(self._state, self._call, self._request_deserializer))
       self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
@@ -332,7 +333,7 @@ def _unary_request(rpc_event, state, request_deserializer):
       if state.client is _CANCELLED or state.statused:
         return None
       else:
-        start_batch_result = rpc_event.operation_call.start_batch(
+        start_server_batch_result = rpc_event.operation_call.start_server_batch(
             cygrpc.Operations(
                 (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
             _receive_message(
@@ -346,7 +347,7 @@ def _unary_request(rpc_event, state, request_deserializer):
                   rpc_event.request_call_details.method)
               _abort(
                   state, rpc_event.operation_call,
-                  cygrpc.StatusCode.unimplemented, details)
+                  cygrpc.StatusCode.unimplemented, _common.encode(details))
               return None
             elif state.client is _CANCELLED:
               return None
@@ -366,8 +367,8 @@ def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
       if e not in state.rpc_errors:
         details = 'Exception calling application: {}'.format(e)
         logging.exception(details)
-        _abort(
-            state, rpc_event.operation_call, cygrpc.StatusCode.unknown, details)
+        _abort(state, rpc_event.operation_call,
+               cygrpc.StatusCode.unknown, _common.encode(details))
     return None, False
 
 
@@ -381,8 +382,8 @@ def _take_response_from_response_iterator(rpc_event, state, response_iterator):
       if e not in state.rpc_errors:
         details = 'Exception iterating responses: {}'.format(e)
         logging.exception(details)
-        _abort(
-            state, rpc_event.operation_call, cygrpc.StatusCode.unknown, details)
+        _abort(state, rpc_event.operation_call,
+               cygrpc.StatusCode.unknown, _common.encode(details))
     return None, False
 
 
@@ -392,7 +393,7 @@ def _serialize_response(rpc_event, state, response, response_serializer):
     with state.condition:
       _abort(
           state, rpc_event.operation_call, cygrpc.StatusCode.internal,
-          'Failed to serialize response!')
+          b'Failed to serialize response!')
     return None
   else:
     return serialized_response
@@ -416,7 +417,7 @@ def _send_response(rpc_event, state, serialized_response):
             cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
         )
         token = _SEND_MESSAGE_TOKEN
-      rpc_event.operation_call.start_batch(
+      rpc_event.operation_call.start_server_batch(
           cygrpc.Operations(operations), _send_message(state, token))
       state.due.add(token)
       while True:
@@ -428,7 +429,7 @@ def _send_response(rpc_event, state, serialized_response):
 def _status(rpc_event, state, serialized_response):
   with state.condition:
     if state.client is not _CANCELLED:
-      trailing_metadata = _common.metadata(state.trailing_metadata)
+      trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
       code = _completion_code(state)
       details = _details(state)
       operations = [
@@ -442,7 +443,7 @@ def _status(rpc_event, state, serialized_response):
       if serialized_response is not None:
         operations.append(cygrpc.operation_send_message(
             serialized_response, _EMPTY_FLAGS))
-      rpc_event.operation_call.start_batch(
+      rpc_event.operation_call.start_server_batch(
           cygrpc.Operations(operations),
           _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
       state.statused = True
@@ -532,7 +533,8 @@ def _find_method_handler(rpc_event, generic_handlers):
   for generic_handler in generic_handlers:
     method_handler = generic_handler.service(
         _HandlerCallDetails(
-            rpc_event.request_call_details.method, rpc_event.request_metadata))
+            _common.decode(rpc_event.request_call_details.method),
+            rpc_event.request_metadata))
     if method_handler is not None:
       return method_handler
   else:
@@ -545,10 +547,10 @@ def _handle_unrecognized_method(rpc_event):
       cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
       cygrpc.operation_send_status_from_server(
           _EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
-          'Method not found!', _EMPTY_FLAGS),
+          b'Method not found!', _EMPTY_FLAGS),
   )
   rpc_state = _RPCState()
-  rpc_event.operation_call.start_batch(
+  rpc_event.operation_call.start_server_batch(
       operations, lambda ignored_event: (rpc_state, (),))
   return rpc_state
 
@@ -556,7 +558,7 @@ def _handle_unrecognized_method(rpc_event):
 def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
   state = _RPCState()
   with state.condition:
-    rpc_event.operation_call.start_batch(
+    rpc_event.operation_call.start_server_batch(
         cygrpc.Operations(
             (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
         _receive_close_on_server(state))
@@ -729,7 +731,7 @@ def _start(state):
 
 class Server(grpc.Server):
 
-  def __init__(self, generic_handlers, thread_pool):
+  def __init__(self, thread_pool, generic_handlers):
     completion_queue = cygrpc.CompletionQueue()
     server = cygrpc.Server()
     server.register_completion_queue(completion_queue)
@@ -740,10 +742,10 @@ class Server(grpc.Server):
     _add_generic_handlers(self._state, generic_rpc_handlers)
 
   def add_insecure_port(self, address):
-    return _add_insecure_port(self._state, address)
+    return _add_insecure_port(self._state, _common.encode(address))
 
   def add_secure_port(self, address, server_credentials):
-    return _add_secure_port(self._state, address, server_credentials)
+    return _add_secure_port(self._state, _common.encode(address), server_credentials)
 
   def start(self):
     _start(self._state)
diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py
index 024808c5404168657c5a5656b44a928d9d1aa36f..73415e0be7f3465cae9459fbef6f425697ff5373 100644
--- a/src/python/grpcio/grpc/beta/_client_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_client_adaptations.py
@@ -117,7 +117,10 @@ class _Rendezvous(future.Future, face.Call):
   def exception(self, timeout=None):
     try:
       rpc_error_call = self._future.exception(timeout=timeout)
-      return _abortion_error(rpc_error_call)
+      if rpc_error_call is None:
+        return None
+      else:
+        return _abortion_error(rpc_error_call)
     except grpc.FutureTimeoutError:
       raise future.TimeoutError()
     except grpc.FutureCancelledError:
@@ -186,9 +189,9 @@ def _blocking_unary_unary(
         response_deserializer=response_deserializer)
     effective_metadata = _effective_metadata(metadata, metadata_transformer)
     if with_call:
-      response, call = multi_callable(
+      response, call = multi_callable.with_call(
           request, timeout=timeout, metadata=effective_metadata,
-          credentials=_credentials(protocol_options), with_call=True)
+          credentials=_credentials(protocol_options))
       return response, _Rendezvous(None, None, call)
     else:
       return multi_callable(
@@ -237,9 +240,9 @@ def _blocking_stream_unary(
         response_deserializer=response_deserializer)
     effective_metadata = _effective_metadata(metadata, metadata_transformer)
     if with_call:
-      response, call = multi_callable(
+      response, call = multi_callable.with_call(
           request_iterator, timeout=timeout, metadata=effective_metadata,
-          credentials=_credentials(protocol_options), with_call=True)
+          credentials=_credentials(protocol_options))
       return response, _Rendezvous(None, None, call)
     else:
       return multi_callable(
diff --git a/src/python/grpcio/grpc/beta/_server.py b/src/python/grpcio/grpc/beta/_server.py
deleted file mode 100644
index eb0aadb42f975a894ae1633f6d215910ce220967..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/beta/_server.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Beta API server implementation."""
-
-import threading
-
-from grpc._links import service
-from grpc.beta import interfaces
-from grpc.framework.core import implementations as _core_implementations
-from grpc.framework.crust import implementations as _crust_implementations
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import utilities
-
-_DEFAULT_POOL_SIZE = 8
-_DEFAULT_TIMEOUT = 300
-_MAXIMUM_TIMEOUT = 24 * 60 * 60
-
-
-def _set_event():
-  event = threading.Event()
-  event.set()
-  return event
-
-
-class _GRPCServicer(base.Servicer):
-
-  def __init__(self, delegate):
-    self._delegate = delegate
-
-  def service(self, group, method, context, output_operator):
-    try:
-      return self._delegate.service(group, method, context, output_operator)
-    except base.NoSuchMethodError as e:
-      if e.code is None and e.details is None:
-        raise base.NoSuchMethodError(
-            interfaces.StatusCode.UNIMPLEMENTED,
-            'Method "%s" of service "%s" not implemented!' % (method, group))
-      else:
-        raise
-
-
-class _Server(interfaces.Server):
-
-  def __init__(
-      self, implementations, multi_implementation, pool, pool_size,
-      default_timeout, maximum_timeout, grpc_link):
-    self._lock = threading.Lock()
-    self._implementations = implementations
-    self._multi_implementation = multi_implementation
-    self._customer_pool = pool
-    self._pool_size = pool_size
-    self._default_timeout = default_timeout
-    self._maximum_timeout = maximum_timeout
-    self._grpc_link = grpc_link
-
-    self._end_link = None
-    self._stop_events = None
-    self._pool = None
-
-  def _start(self):
-    with self._lock:
-      if self._end_link is not None:
-        raise ValueError('Cannot start already-started server!')
-
-      if self._customer_pool is None:
-        self._pool = logging_pool.pool(self._pool_size)
-        assembly_pool = self._pool
-      else:
-        assembly_pool = self._customer_pool
-
-      servicer = _GRPCServicer(
-          _crust_implementations.servicer(
-              self._implementations, self._multi_implementation, assembly_pool))
-
-      self._end_link = _core_implementations.service_end_link(
-          servicer, self._default_timeout, self._maximum_timeout)
-
-      self._grpc_link.join_link(self._end_link)
-      self._end_link.join_link(self._grpc_link)
-      self._grpc_link.start()
-      self._end_link.start()
-
-  def _dissociate_links_and_shut_down_pool(self):
-    self._grpc_link.end_stop()
-    self._grpc_link.join_link(utilities.NULL_LINK)
-    self._end_link.join_link(utilities.NULL_LINK)
-    self._end_link = None
-    if self._pool is not None:
-      self._pool.shutdown(wait=True)
-    self._pool = None
-
-  def _stop_stopping(self):
-    self._dissociate_links_and_shut_down_pool()
-    for stop_event in self._stop_events:
-      stop_event.set()
-    self._stop_events = None
-
-  def _stop_started(self):
-    self._grpc_link.begin_stop()
-    self._end_link.stop(0).wait()
-    self._dissociate_links_and_shut_down_pool()
-
-  def _foreign_thread_stop(self, end_stop_event, stop_events):
-    end_stop_event.wait()
-    with self._lock:
-      if self._stop_events is stop_events:
-        self._stop_stopping()
-
-  def _schedule_stop(self, grace):
-    with self._lock:
-      if self._end_link is None:
-        return _set_event()
-      server_stop_event = threading.Event()
-      if self._stop_events is None:
-        self._stop_events = [server_stop_event]
-        self._grpc_link.begin_stop()
-      else:
-        self._stop_events.append(server_stop_event)
-      end_stop_event = self._end_link.stop(grace)
-      end_stop_thread = threading.Thread(
-          target=self._foreign_thread_stop,
-          args=(end_stop_event, self._stop_events))
-      end_stop_thread.start()
-      return server_stop_event
-
-  def _stop_now(self):
-    with self._lock:
-      if self._end_link is not None:
-        if self._stop_events is None:
-          self._stop_started()
-        else:
-          self._stop_stopping()
-
-  def add_insecure_port(self, address):
-    with self._lock:
-      if self._end_link is None:
-        return self._grpc_link.add_port(address, None)
-      else:
-        raise ValueError('Can\'t add port to serving server!')
-
-  def add_secure_port(self, address, server_credentials):
-    with self._lock:
-      if self._end_link is None:
-        return self._grpc_link.add_port(
-            address, server_credentials._low_credentials)  # pylint: disable=protected-access
-      else:
-        raise ValueError('Can\'t add port to serving server!')
-
-  def start(self):
-    self._start()
-
-  def stop(self, grace):
-    if 0 < grace:
-      return self._schedule_stop(grace)
-    else:
-      self._stop_now()
-      return _set_event()
-
-  def __enter__(self):
-    self._start()
-    return self
-
-  def __exit__(self, exc_type, exc_val, exc_tb):
-    self._stop_now()
-    return False
-
-  def __del__(self):
-    self._stop_now()
-
-
-def server(
-    implementations, multi_implementation, request_deserializers,
-    response_serializers, thread_pool, thread_pool_size, default_timeout,
-    maximum_timeout):
-  grpc_link = service.service_link(request_deserializers, response_serializers)
-  return _Server(
-      implementations, multi_implementation, thread_pool,
-      _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size,
-      _DEFAULT_TIMEOUT if default_timeout is None else default_timeout,
-      _MAXIMUM_TIMEOUT if maximum_timeout is None else maximum_timeout,
-      grpc_link)
diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py
index 79e6ca87eb5223216b2db9ceaf7ef14911fc5d4c..cca4a1797a1ab8425e033de2860a2457b9ce2b23 100644
--- a/src/python/grpcio/grpc/beta/_server_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_server_adaptations.py
@@ -79,7 +79,8 @@ class _FaceServicerContext(face.ServicerContext):
     return _ServerProtocolContext(self._servicer_context)
 
   def invocation_metadata(self):
-    return self._servicer_context.invocation_metadata()
+    return _common.cygrpc_metadata(
+        self._servicer_context.invocation_metadata())
 
   def initial_metadata(self, initial_metadata):
     self._servicer_context.send_initial_metadata(initial_metadata)
@@ -161,14 +162,24 @@ class _Callback(stream.Consumer):
           self._condition.wait()
 
 
-def _pipe_requests(request_iterator, request_consumer, servicer_context):
-  for request in request_iterator:
-    if not servicer_context.is_active():
-      return
-    request_consumer.consume(request)
-    if not servicer_context.is_active():
-      return
-  request_consumer.terminate()
+def _run_request_pipe_thread(request_iterator, request_consumer,
+                             servicer_context):
+  thread_joined = threading.Event()
+  def pipe_requests():
+    for request in request_iterator:
+      if not servicer_context.is_active() or thread_joined.is_set():
+        return
+      request_consumer.consume(request)
+      if not servicer_context.is_active() or thread_joined.is_set():
+        return
+    request_consumer.terminate()
+
+  def stop_request_pipe(timeout):
+    thread_joined.set()
+
+  request_pipe_thread = _common.CleanupThread(
+      stop_request_pipe, target=pipe_requests)
+  request_pipe_thread.start()
 
 
 def _adapt_unary_unary_event(unary_unary_event):
@@ -206,10 +217,8 @@ def _adapt_stream_unary_event(stream_unary_event):
       raise abandonment.Abandoned()
     request_consumer = stream_unary_event(
         callback.consume_and_terminate, _FaceServicerContext(servicer_context))
-    request_pipe_thread = threading.Thread(
-        target=_pipe_requests,
-        args=(request_iterator, request_consumer, servicer_context,))
-    request_pipe_thread.start()
+    _run_request_pipe_thread(
+        request_iterator, request_consumer, servicer_context)
     return callback.draw_all_values()[0]
   return adaptation
 
@@ -221,10 +230,8 @@ def _adapt_stream_stream_event(stream_stream_event):
       raise abandonment.Abandoned()
     request_consumer = stream_stream_event(
         callback, _FaceServicerContext(servicer_context))
-    request_pipe_thread = threading.Thread(
-        target=_pipe_requests,
-        args=(request_iterator, request_consumer, servicer_context,))
-    request_pipe_thread.start()
+    _run_request_pipe_thread(
+        request_iterator, request_consumer, servicer_context)
     while True:
       response = callback.draw_one_value()
       if response is None:
@@ -364,4 +371,5 @@ def server(
         _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size)
   else:
     effective_thread_pool = thread_pool
-  return _Server(grpc.server((generic_rpc_handler,), effective_thread_pool))
+  return _Server(
+      grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
diff --git a/src/python/grpcio/grpc/beta/_stub.py b/src/python/grpcio/grpc/beta/_stub.py
deleted file mode 100644
index 2af019309afc412077089c21986f3397e690218c..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/beta/_stub.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Beta API stub implementation."""
-
-import threading
-
-from grpc._links import invocation
-from grpc.framework.core import implementations as _core_implementations
-from grpc.framework.crust import implementations as _crust_implementations
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.links import utilities
-
-_DEFAULT_POOL_SIZE = 6
-
-
-class _AutoIntermediary(object):
-
-  def __init__(self, up, down, delegate):
-    self._lock = threading.Lock()
-    self._up = up
-    self._down = down
-    self._in_context = False
-    self._delegate = delegate
-
-  def __getattr__(self, attr):
-    with self._lock:
-      if self._delegate is None:
-        raise AttributeError('No useful attributes out of context!')
-      else:
-        return getattr(self._delegate, attr)
-
-  def __enter__(self):
-    with self._lock:
-      if self._in_context:
-        raise ValueError('Already in context!')
-      elif self._delegate is None:
-        self._delegate = self._up()
-      self._in_context = True
-      return self
-
-  def __exit__(self, exc_type, exc_val, exc_tb):
-    with self._lock:
-      if not self._in_context:
-        raise ValueError('Not in context!')
-      self._down()
-      self._in_context = False
-      self._delegate = None
-      return False
-
-  def __del__(self):
-    with self._lock:
-      if self._delegate is not None:
-        self._down()
-        self._delegate = None
-
-
-class _StubAssemblyManager(object):
-
-  def __init__(
-      self, thread_pool, thread_pool_size, end_link, grpc_link, stub_creator):
-    self._thread_pool = thread_pool
-    self._pool_size = thread_pool_size
-    self._end_link = end_link
-    self._grpc_link = grpc_link
-    self._stub_creator = stub_creator
-    self._own_pool = None
-
-  def up(self):
-    if self._thread_pool is None:
-      self._own_pool = logging_pool.pool(
-          _DEFAULT_POOL_SIZE if self._pool_size is None else self._pool_size)
-      assembly_pool = self._own_pool
-    else:
-      assembly_pool = self._thread_pool
-    self._end_link.join_link(self._grpc_link)
-    self._grpc_link.join_link(self._end_link)
-    self._end_link.start()
-    self._grpc_link.start()
-    return self._stub_creator(self._end_link, assembly_pool)
-
-  def down(self):
-    self._end_link.stop(0).wait()
-    self._grpc_link.stop()
-    self._end_link.join_link(utilities.NULL_LINK)
-    self._grpc_link.join_link(utilities.NULL_LINK)
-    if self._own_pool is not None:
-      self._own_pool.shutdown(wait=True)
-      self._own_pool = None
-
-
-def _assemble(
-    channel, host, metadata_transformer, request_serializers,
-    response_deserializers, thread_pool, thread_pool_size, stub_creator):
-  end_link = _core_implementations.invocation_end_link()
-  grpc_link = invocation.invocation_link(
-      channel, host, metadata_transformer, request_serializers,
-      response_deserializers)
-  stub_assembly_manager = _StubAssemblyManager(
-      thread_pool, thread_pool_size, end_link, grpc_link, stub_creator)
-  stub = stub_assembly_manager.up()
-  return _AutoIntermediary(
-      stub_assembly_manager.up, stub_assembly_manager.down, stub)
-
-
-def _dynamic_stub_creator(service, cardinalities):
-  def create_dynamic_stub(end_link, invocation_pool):
-    return _crust_implementations.dynamic_stub(
-        end_link, service, cardinalities, invocation_pool)
-  return create_dynamic_stub
-
-
-def generic_stub(
-    channel, host, metadata_transformer, request_serializers,
-    response_deserializers, thread_pool, thread_pool_size):
-  return _assemble(
-      channel, host, metadata_transformer, request_serializers,
-      response_deserializers, thread_pool, thread_pool_size,
-      _crust_implementations.generic_stub)
-
-
-def dynamic_stub(
-    channel, host, service, cardinalities, metadata_transformer,
-    request_serializers, response_deserializers, thread_pool,
-    thread_pool_size):
-  return _assemble(
-      channel, host, metadata_transformer, request_serializers,
-      response_deserializers, thread_pool, thread_pool_size,
-      _dynamic_stub_creator(service, cardinalities))
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index 4ae6e7d6751ead630a3d79cbb1bdcfc9afab8f60..ab25fd5eec79ce78b01cbae04f8c94ca7107c2f9 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -37,7 +37,6 @@ import threading  # pylint: disable=unused-import
 # cardinality and face are referenced from specification in this module.
 import grpc
 from grpc import _auth
-from grpc._adapter import _types
 from grpc.beta import _client_adaptations
 from grpc.beta import _server_adaptations
 from grpc.beta import interfaces
diff --git a/src/python/grpcio/grpc/framework/core/_constants.py b/src/python/grpcio/grpc/framework/core/_constants.py
deleted file mode 100644
index 0f47cb48e0365e3e10230ab39227a0027005ef3e..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_constants.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Private constants for the package."""
-
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import links
-
-TICKET_SUBSCRIPTION_FOR_BASE_SUBSCRIPTION_KIND = {
-    base.Subscription.Kind.NONE: links.Ticket.Subscription.NONE,
-    base.Subscription.Kind.TERMINATION_ONLY:
-        links.Ticket.Subscription.TERMINATION,
-    base.Subscription.Kind.FULL: links.Ticket.Subscription.FULL,
-    }
-
-# Mapping from abortive operation outcome to ticket termination to be
-# sent to the other side of the operation, or None to indicate that no
-# ticket should be sent to the other side in the event of such an
-# outcome.
-ABORTION_OUTCOME_TO_TICKET_TERMINATION = {
-    base.Outcome.Kind.CANCELLED: links.Ticket.Termination.CANCELLATION,
-    base.Outcome.Kind.EXPIRED: links.Ticket.Termination.EXPIRATION,
-    base.Outcome.Kind.LOCAL_SHUTDOWN: links.Ticket.Termination.SHUTDOWN,
-    base.Outcome.Kind.REMOTE_SHUTDOWN: None,
-    base.Outcome.Kind.RECEPTION_FAILURE:
-        links.Ticket.Termination.RECEPTION_FAILURE,
-    base.Outcome.Kind.TRANSMISSION_FAILURE: None,
-    base.Outcome.Kind.LOCAL_FAILURE: links.Ticket.Termination.LOCAL_FAILURE,
-    base.Outcome.Kind.REMOTE_FAILURE: links.Ticket.Termination.REMOTE_FAILURE,
-}
-
-INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Core) internal error! )-:'
-TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE = (
-    'Exception calling termination callback!')
diff --git a/src/python/grpcio/grpc/framework/core/_context.py b/src/python/grpcio/grpc/framework/core/_context.py
deleted file mode 100644
index a346e9d478b8bbfde59aa3bca7ae3f3c721fb4da..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_context.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for operation context."""
-
-import time
-
-# _interfaces is referenced from specification in this module.
-from grpc.framework.core import _interfaces  # pylint: disable=unused-import
-from grpc.framework.core import _utilities
-from grpc.framework.interfaces.base import base
-
-
-class OperationContext(base.OperationContext):
-  """An implementation of interfaces.OperationContext."""
-
-  def __init__(
-      self, lock, termination_manager, transmission_manager,
-      expiration_manager):
-    """Constructor.
-
-    Args:
-      lock: The operation-wide lock.
-      termination_manager: The _interfaces.TerminationManager for the operation.
-      transmission_manager: The _interfaces.TransmissionManager for the
-        operation.
-      expiration_manager: The _interfaces.ExpirationManager for the operation.
-    """
-    self._lock = lock
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._expiration_manager = expiration_manager
-
-  def _abort(self, outcome_kind):
-    with self._lock:
-      if self._termination_manager.outcome is None:
-        outcome = _utilities.Outcome(outcome_kind, None, None)
-        self._termination_manager.abort(outcome)
-        self._transmission_manager.abort(outcome)
-        self._expiration_manager.terminate()
-
-  def outcome(self):
-    """See base.OperationContext.outcome for specification."""
-    with self._lock:
-      return self._termination_manager.outcome
-
-  def add_termination_callback(self, callback):
-    """See base.OperationContext.add_termination_callback."""
-    with self._lock:
-      if self._termination_manager.outcome is None:
-        self._termination_manager.add_callback(callback)
-        return None
-      else:
-        return self._termination_manager.outcome
-
-  def time_remaining(self):
-    """See base.OperationContext.time_remaining for specification."""
-    with self._lock:
-      deadline = self._expiration_manager.deadline()
-    return max(0.0, deadline - time.time())
-
-  def cancel(self):
-    """See base.OperationContext.cancel for specification."""
-    self._abort(base.Outcome.Kind.CANCELLED)
-
-  def fail(self, exception):
-    """See base.OperationContext.fail for specification."""
-    self._abort(base.Outcome.Kind.LOCAL_FAILURE)
diff --git a/src/python/grpcio/grpc/framework/core/_emission.py b/src/python/grpcio/grpc/framework/core/_emission.py
deleted file mode 100644
index 8ab59dc3e585346a6f333560890053191c7228a1..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_emission.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for handling emitted values."""
-
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.interfaces.base import base
-
-
-class EmissionManager(_interfaces.EmissionManager):
-  """An EmissionManager implementation."""
-
-  def __init__(
-      self, lock, termination_manager, transmission_manager,
-      expiration_manager):
-    """Constructor.
-
-    Args:
-      lock: The operation-wide lock.
-      termination_manager: The _interfaces.TerminationManager for the operation.
-      transmission_manager: The _interfaces.TransmissionManager for the
-        operation.
-      expiration_manager: The _interfaces.ExpirationManager for the operation.
-    """
-    self._lock = lock
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._expiration_manager = expiration_manager
-    self._ingestion_manager = None
-
-    self._initial_metadata_seen = False
-    self._payload_seen = False
-    self._completion_seen = False
-
-  def set_ingestion_manager(self, ingestion_manager):
-    """Sets the ingestion manager with which this manager will cooperate.
-
-    Args:
-      ingestion_manager: The _interfaces.IngestionManager for the operation.
-    """
-    self._ingestion_manager = ingestion_manager
-
-  def advance(
-      self, initial_metadata=None, payload=None, completion=None,
-      allowance=None):
-    initial_metadata_present = initial_metadata is not None
-    payload_present = payload is not None
-    completion_present = completion is not None
-    allowance_present = allowance is not None
-    with self._lock:
-      if self._termination_manager.outcome is None:
-        if (initial_metadata_present and (
-                self._initial_metadata_seen or self._payload_seen or
-                self._completion_seen) or
-            payload_present and self._completion_seen or
-            completion_present and self._completion_seen or
-            allowance_present and allowance <= 0):
-          outcome = _utilities.Outcome(
-              base.Outcome.Kind.LOCAL_FAILURE, None, None)
-          self._termination_manager.abort(outcome)
-          self._transmission_manager.abort(outcome)
-          self._expiration_manager.terminate()
-        else:
-          self._initial_metadata_seen |= initial_metadata_present
-          self._payload_seen |= payload_present
-          self._completion_seen |= completion_present
-          if completion_present:
-            self._termination_manager.emission_complete()
-            self._ingestion_manager.local_emissions_done()
-          self._transmission_manager.advance(
-              initial_metadata, payload, completion, allowance)
-          if allowance_present:
-            self._ingestion_manager.add_local_allowance(allowance)
diff --git a/src/python/grpcio/grpc/framework/core/_end.py b/src/python/grpcio/grpc/framework/core/_end.py
deleted file mode 100644
index 009d27c915c4409479562e98fe4d5ecac78b2c47..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_end.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Implementation of base.End."""
-
-import abc
-import threading
-import uuid
-
-import six
-
-from grpc.framework.core import _operation
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.foundation import later
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import links
-from grpc.framework.interfaces.links import utilities
-
-_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!'
-
-
-class End(six.with_metaclass(abc.ABCMeta, base.End, links.Link)):
-  """A bridge between base.End and links.Link.
-
-  Implementations of this interface translate arriving tickets into
-  calls on application objects implementing base interfaces and
-  translate calls from application objects implementing base interfaces
-  into tickets sent to a joined link.
-  """
-
-
-class _Cycle(object):
-  """State for a single start-stop End lifecycle."""
-
-  def __init__(self, pool):
-    self.pool = pool
-    self.grace = False
-    self.futures = []
-    self.operations = {}
-    self.idle_actions = []
-
-
-def _abort(operations):
-  for operation in operations:
-    operation.abort(base.Outcome.Kind.LOCAL_SHUTDOWN)
-
-
-def _cancel_futures(futures):
-  for future in futures:
-    future.cancel()
-
-
-def _future_shutdown(lock, cycle, event):
-  def in_future():
-    with lock:
-      _abort(cycle.operations.values())
-      _cancel_futures(cycle.futures)
-  return in_future
-
-
-class _End(End):
-  """An End implementation."""
-
-  def __init__(self, servicer_package):
-    """Constructor.
-
-    Args:
-      servicer_package: A _ServicerPackage for servicing operations or None if
-        this end will not be used to service operations.
-    """
-    self._lock = threading.Condition()
-    self._servicer_package = servicer_package
-
-    self._stats = {outcome_kind: 0 for outcome_kind in base.Outcome.Kind}
-
-    self._mate = None
-
-    self._cycle = None
-
-  def _termination_action(self, operation_id):
-    """Constructs the termination action for a single operation.
-
-    Args:
-      operation_id: The operation ID for the termination action.
-
-    Returns:
-      A callable that takes an operation outcome kind as its sole parameter and
-        that should be used as the termination action for the operation
-        associated with the given operation ID.
-    """
-    def termination_action(outcome_kind):
-      with self._lock:
-        self._stats[outcome_kind] += 1
-        self._cycle.operations.pop(operation_id, None)
-        if not self._cycle.operations:
-          for action in self._cycle.idle_actions:
-            self._cycle.pool.submit(action)
-          self._cycle.idle_actions = []
-          if self._cycle.grace:
-            _cancel_futures(self._cycle.futures)
-            self._cycle.pool.shutdown(wait=False)
-            self._cycle = None
-    return termination_action
-
-  def start(self):
-    """See base.End.start for specification."""
-    with self._lock:
-      if self._cycle is not None:
-        raise ValueError('Tried to start a not-stopped End!')
-      else:
-        self._cycle = _Cycle(logging_pool.pool(1))
-
-  def stop(self, grace):
-    """See base.End.stop for specification."""
-    with self._lock:
-      if self._cycle is None:
-        event = threading.Event()
-        event.set()
-        return event
-      elif not self._cycle.operations:
-        event = threading.Event()
-        self._cycle.pool.submit(event.set)
-        self._cycle.pool.shutdown(wait=False)
-        self._cycle = None
-        return event
-      else:
-        self._cycle.grace = True
-        event = threading.Event()
-        self._cycle.idle_actions.append(event.set)
-        if 0 < grace:
-          future = later.later(
-              grace, _future_shutdown(self._lock, self._cycle, event))
-          self._cycle.futures.append(future)
-        else:
-          _abort(self._cycle.operations.values())
-        return event
-
-  def operate(
-      self, group, method, subscription, timeout, initial_metadata=None,
-      payload=None, completion=None, protocol_options=None):
-    """See base.End.operate for specification."""
-    operation_id = uuid.uuid4()
-    with self._lock:
-      if self._cycle is None or self._cycle.grace:
-        raise ValueError('Can\'t operate on stopped or stopping End!')
-      termination_action = self._termination_action(operation_id)
-      operation = _operation.invocation_operate(
-          operation_id, group, method, subscription, timeout, protocol_options,
-          initial_metadata, payload, completion, self._mate.accept_ticket,
-          termination_action, self._cycle.pool)
-      self._cycle.operations[operation_id] = operation
-      return operation.context, operation.operator
-
-  def operation_stats(self):
-    """See base.End.operation_stats for specification."""
-    with self._lock:
-      return dict(self._stats)
-
-  def add_idle_action(self, action):
-    """See base.End.add_idle_action for specification."""
-    with self._lock:
-      if self._cycle is None:
-        raise ValueError('Can\'t add idle action to stopped End!')
-      action_with_exceptions_logged = callable_util.with_exceptions_logged(
-          action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE)
-      if self._cycle.operations:
-        self._cycle.idle_actions.append(action_with_exceptions_logged)
-      else:
-        self._cycle.pool.submit(action_with_exceptions_logged)
-
-  def accept_ticket(self, ticket):
-    """See links.Link.accept_ticket for specification."""
-    with self._lock:
-      if self._cycle is not None:
-        operation = self._cycle.operations.get(ticket.operation_id)
-        if operation is not None:
-          operation.handle_ticket(ticket)
-        elif self._servicer_package is not None and not self._cycle.grace:
-          termination_action = self._termination_action(ticket.operation_id)
-          operation = _operation.service_operate(
-              self._servicer_package, ticket, self._mate.accept_ticket,
-              termination_action, self._cycle.pool)
-          if operation is not None:
-            self._cycle.operations[ticket.operation_id] = operation
-
-  def join_link(self, link):
-    """See links.Link.join_link for specification."""
-    with self._lock:
-      self._mate = utilities.NULL_LINK if link is None else link
-
-
-def serviceless_end_link():
-  """Constructs an End usable only for invoking operations.
-
-  Returns:
-    An End usable for translating operations into ticket exchange.
-  """
-  return _End(None)
-
-
-def serviceful_end_link(servicer, default_timeout, maximum_timeout):
-  """Constructs an End capable of servicing operations.
-
-  Args:
-    servicer: An interfaces.Servicer for servicing operations.
-    default_timeout: A length of time in seconds to be used as the default
-      time alloted for a single operation.
-    maximum_timeout: A length of time in seconds to be used as the maximum
-      time alloted for a single operation.
-
-  Returns:
-    An End capable of servicing the operations requested of it through ticket
-      exchange.
-  """
-  return _End(
-      _utilities.ServicerPackage(servicer, default_timeout, maximum_timeout))
diff --git a/src/python/grpcio/grpc/framework/core/_expiration.py b/src/python/grpcio/grpc/framework/core/_expiration.py
deleted file mode 100644
index ded0ab6bcef366539eb005f37c937888714176f6..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_expiration.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for operation expiration."""
-
-import time
-
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import later
-from grpc.framework.interfaces.base import base
-
-
-class _ExpirationManager(_interfaces.ExpirationManager):
-  """An implementation of _interfaces.ExpirationManager."""
-
-  def __init__(
-      self, commencement, timeout, maximum_timeout, lock, termination_manager,
-      transmission_manager):
-    """Constructor.
-
-    Args:
-      commencement: The time in seconds since the epoch at which the operation
-        began.
-      timeout: A length of time in seconds to allow for the operation to run.
-      maximum_timeout: The maximum length of time in seconds to allow for the
-        operation to run despite what is requested via this object's
-        change_timout method.
-      lock: The operation-wide lock.
-      termination_manager: The _interfaces.TerminationManager for the operation.
-      transmission_manager: The _interfaces.TransmissionManager for the
-        operation.
-    """
-    self._lock = lock
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._commencement = commencement
-    self._maximum_timeout = maximum_timeout
-
-    self._timeout = timeout
-    self._deadline = commencement + timeout
-    self._index = None
-    self._future = None
-
-  def _expire(self, index):
-    def expire():
-      with self._lock:
-        if self._future is not None and index == self._index:
-          self._future = None
-          self._termination_manager.expire()
-          self._transmission_manager.abort(
-              _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None))
-    return expire
-
-  def start(self):
-    self._index = 0
-    self._future = later.later(self._timeout, self._expire(0))
-
-  def change_timeout(self, timeout):
-    if self._future is not None and timeout != self._timeout:
-      self._future.cancel()
-      new_timeout = min(timeout, self._maximum_timeout)
-      new_index = self._index + 1
-      self._timeout = new_timeout
-      self._deadline = self._commencement + new_timeout
-      self._index = new_index
-      delay = self._deadline - time.time()
-      self._future = later.later(delay, self._expire(new_index))
-      if new_timeout != timeout:
-        self._transmission_manager.timeout(new_timeout)
-
-  def deadline(self):
-    return self._deadline
-
-  def terminate(self):
-    if self._future:
-      self._future.cancel()
-      self._future = None
-    self._deadline_index = None
-
-
-def invocation_expiration_manager(
-    timeout, lock, termination_manager, transmission_manager):
-  """Creates an _interfaces.ExpirationManager appropriate for front-side use.
-
-  Args:
-    timeout: A length of time in seconds to allow for the operation to run.
-    lock: The operation-wide lock.
-    termination_manager: The _interfaces.TerminationManager for the operation.
-    transmission_manager: The _interfaces.TransmissionManager for the
-      operation.
-
-  Returns:
-    An _interfaces.ExpirationManager appropriate for invocation-side use.
-  """
-  expiration_manager = _ExpirationManager(
-      time.time(), timeout, timeout, lock, termination_manager,
-      transmission_manager)
-  expiration_manager.start()
-  return expiration_manager
-
-
-def service_expiration_manager(
-    timeout, default_timeout, maximum_timeout, lock, termination_manager,
-    transmission_manager):
-  """Creates an _interfaces.ExpirationManager appropriate for back-side use.
-
-  Args:
-    timeout: A length of time in seconds to allow for the operation to run. May
-      be None in which case default_timeout will be used.
-    default_timeout: The default length of time in seconds to allow for the
-      operation to run if the front-side customer has not specified such a value
-      (or if the value they specified is not yet known).
-    maximum_timeout: The maximum length of time in seconds to allow for the
-      operation to run.
-    lock: The operation-wide lock.
-    termination_manager: The _interfaces.TerminationManager for the operation.
-    transmission_manager: The _interfaces.TransmissionManager for the
-      operation.
-
-  Returns:
-    An _interfaces.ExpirationManager appropriate for service-side use.
-  """
-  expiration_manager = _ExpirationManager(
-      time.time(), default_timeout if timeout is None else timeout,
-      maximum_timeout, lock, termination_manager, transmission_manager)
-  expiration_manager.start()
-  return expiration_manager
diff --git a/src/python/grpcio/grpc/framework/core/_ingestion.py b/src/python/grpcio/grpc/framework/core/_ingestion.py
deleted file mode 100644
index f2767c981b2e3d4c62903794d581e800dd5171f6..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_ingestion.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for ingestion during an operation."""
-
-import abc
-import collections
-import enum
-
-import six
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import abandonment
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-
-_CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE = 'Exception initializing ingestion!'
-_INGESTION_EXCEPTION_LOG_MESSAGE = 'Exception during ingestion!'
-
-
-class _SubscriptionCreation(
-    collections.namedtuple(
-        '_SubscriptionCreation',
-        ('kind', 'subscription', 'code', 'details',))):
-  """A sum type for the outcome of ingestion initialization.
-
-  Attributes:
-    kind: A Kind value coarsely indicating how subscription creation completed.
-    subscription: The created subscription. Only present if kind is
-      Kind.SUBSCRIPTION.
-    code: A code value to be sent to the other side of the operation along with
-      an indication that the operation is being aborted due to an error on the
-      remote side of the operation. Only present if kind is Kind.REMOTE_ERROR.
-    details: A details value to be sent to the other side of the operation
-      along with an indication that the operation is being aborted due to an
-      error on the remote side of the operation. Only present if kind is
-      Kind.REMOTE_ERROR.
-  """
-
-  @enum.unique
-  class Kind(enum.Enum):
-    SUBSCRIPTION = 'subscription'
-    REMOTE_ERROR = 'remote error'
-    ABANDONED = 'abandoned'
-
-
-class _SubscriptionCreator(six.with_metaclass(abc.ABCMeta)):
-  """Common specification of subscription-creating behavior."""
-
-  @abc.abstractmethod
-  def create(self, group, method):
-    """Creates the base.Subscription of the local customer.
-
-    Any exceptions raised by this method should be attributed to and treated as
-    defects in the customer code called by this method.
-
-    Args:
-      group: The group identifier of the operation.
-      method: The method identifier of the operation.
-
-    Returns:
-      A _SubscriptionCreation describing the result of subscription creation.
-    """
-    raise NotImplementedError()
-
-
-class _ServiceSubscriptionCreator(_SubscriptionCreator):
-  """A _SubscriptionCreator appropriate for service-side use."""
-
-  def __init__(self, servicer, operation_context, output_operator):
-    """Constructor.
-
-    Args:
-      servicer: The base.Servicer that will service the operation.
-      operation_context: A base.OperationContext for the operation to be passed
-        to the customer.
-      output_operator: A base.Operator for the operation to be passed to the
-        customer and to be called by the customer to accept operation data
-        emitted by the customer.
-    """
-    self._servicer = servicer
-    self._operation_context = operation_context
-    self._output_operator = output_operator
-
-  def create(self, group, method):
-    try:
-      subscription = self._servicer.service(
-          group, method, self._operation_context, self._output_operator)
-    except base.NoSuchMethodError as e:
-      return _SubscriptionCreation(
-          _SubscriptionCreation.Kind.REMOTE_ERROR, None, e.code, e.details)
-    except abandonment.Abandoned:
-      return _SubscriptionCreation(
-          _SubscriptionCreation.Kind.ABANDONED, None, None, None)
-    else:
-      return _SubscriptionCreation(
-          _SubscriptionCreation.Kind.SUBSCRIPTION, subscription, None, None)
-
-
-def _wrap(behavior):
-  def wrapped(*args, **kwargs):
-    try:
-      behavior(*args, **kwargs)
-    except abandonment.Abandoned:
-      return False
-    else:
-      return True
-  return wrapped
-
-
-class _IngestionManager(_interfaces.IngestionManager):
-  """An implementation of _interfaces.IngestionManager."""
-
-  def __init__(
-      self, lock, pool, subscription, subscription_creator, termination_manager,
-      transmission_manager, expiration_manager, protocol_manager):
-    """Constructor.
-
-    Args:
-      lock: The operation-wide lock.
-      pool: A thread pool in which to execute customer code.
-      subscription: A base.Subscription describing the customer's interest in
-        operation values from the other side. May be None if
-        subscription_creator is not None.
-      subscription_creator: A _SubscriptionCreator wrapping the portion of
-        customer code that when called returns the base.Subscription describing
-        the customer's interest in operation values from the other side. May be
-        None if subscription is not None.
-      termination_manager: The _interfaces.TerminationManager for the operation.
-      transmission_manager: The _interfaces.TransmissionManager for the
-        operation.
-      expiration_manager: The _interfaces.ExpirationManager for the operation.
-      protocol_manager: The _interfaces.ProtocolManager for the operation.
-    """
-    self._lock = lock
-    self._pool = pool
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._expiration_manager = expiration_manager
-    self._protocol_manager = protocol_manager
-
-    if subscription is None:
-      self._subscription_creator = subscription_creator
-      self._wrapped_operator = None
-    elif subscription.kind is base.Subscription.Kind.FULL:
-      self._subscription_creator = None
-      self._wrapped_operator = _wrap(subscription.operator.advance)
-    else:
-      # TODO(nathaniel): Support other subscriptions.
-      raise ValueError('Unsupported subscription "%s"!' % subscription.kind)
-    self._pending_initial_metadata = None
-    self._pending_payloads = []
-    self._pending_completion = None
-    self._local_allowance = 1
-    # A nonnegative integer or None, with None indicating that the local
-    # customer is done emitting anyway so there's no need to bother it by
-    # informing it that the remote customer has granted it further permission to
-    # emit.
-    self._remote_allowance = 0
-    self._processing = False
-
-  def _abort_internal_only(self):
-    self._subscription_creator = None
-    self._wrapped_operator = None
-    self._pending_initial_metadata = None
-    self._pending_payloads = None
-    self._pending_completion = None
-
-  def _abort_and_notify(self, outcome_kind, code, details):
-    self._abort_internal_only()
-    if self._termination_manager.outcome is None:
-      outcome = _utilities.Outcome(outcome_kind, code, details)
-      self._termination_manager.abort(outcome)
-      self._transmission_manager.abort(outcome)
-      self._expiration_manager.terminate()
-
-  def _operator_next(self):
-    """Computes the next step for full-subscription ingestion.
-
-    Returns:
-      An initial_metadata, payload, completion, allowance, continue quintet
-        indicating what operation values (if any) are available to pass into
-        customer code and whether or not there is anything immediately
-        actionable to call customer code to do.
-    """
-    if self._wrapped_operator is None:
-      return None, None, None, None, False
-    else:
-      initial_metadata, payload, completion, allowance, action = [None] * 5
-      if self._pending_initial_metadata is not None:
-        initial_metadata = self._pending_initial_metadata
-        self._pending_initial_metadata = None
-        action = True
-      if self._pending_payloads and 0 < self._local_allowance:
-        payload = self._pending_payloads.pop(0)
-        self._local_allowance -= 1
-        action = True
-      if not self._pending_payloads and self._pending_completion is not None:
-        completion = self._pending_completion
-        self._pending_completion = None
-        action = True
-      if self._remote_allowance is not None and 0 < self._remote_allowance:
-        allowance = self._remote_allowance
-        self._remote_allowance = 0
-        action = True
-      return initial_metadata, payload, completion, allowance, bool(action)
-
-  def _operator_process(
-      self, wrapped_operator, initial_metadata, payload,
-      completion, allowance):
-    while True:
-      advance_outcome = callable_util.call_logging_exceptions(
-          wrapped_operator, _INGESTION_EXCEPTION_LOG_MESSAGE,
-          initial_metadata=initial_metadata, payload=payload,
-          completion=completion, allowance=allowance)
-      if advance_outcome.exception is None:
-        if advance_outcome.return_value:
-          with self._lock:
-            if self._termination_manager.outcome is not None:
-              return
-            if completion is not None:
-              self._termination_manager.ingestion_complete()
-            initial_metadata, payload, completion, allowance, moar = (
-                self._operator_next())
-            if not moar:
-              self._processing = False
-              return
-        else:
-          with self._lock:
-            if self._termination_manager.outcome is None:
-              self._abort_and_notify(
-                  base.Outcome.Kind.LOCAL_FAILURE, None, None)
-            return
-      else:
-        with self._lock:
-          if self._termination_manager.outcome is None:
-            self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None)
-          return
-
-  def _operator_post_create(self, subscription):
-    wrapped_operator = _wrap(subscription.operator.advance)
-    with self._lock:
-      if self._termination_manager.outcome is not None:
-        return
-      self._wrapped_operator = wrapped_operator
-      self._subscription_creator = None
-      metadata, payload, completion, allowance, moar = self._operator_next()
-      if not moar:
-        self._processing = False
-        return
-    self._operator_process(
-        wrapped_operator, metadata, payload, completion, allowance)
-
-  def _create(self, subscription_creator, group, name):
-    outcome = callable_util.call_logging_exceptions(
-        subscription_creator.create,
-        _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE, group, name)
-    if outcome.return_value is None:
-      with self._lock:
-        if self._termination_manager.outcome is None:
-          self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None)
-    elif outcome.return_value.kind is _SubscriptionCreation.Kind.ABANDONED:
-      with self._lock:
-        if self._termination_manager.outcome is None:
-          self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None)
-    elif outcome.return_value.kind is _SubscriptionCreation.Kind.REMOTE_ERROR:
-      code = outcome.return_value.code
-      details = outcome.return_value.details
-      with self._lock:
-        if self._termination_manager.outcome is None:
-          self._abort_and_notify(
-              base.Outcome.Kind.REMOTE_FAILURE, code, details)
-    elif outcome.return_value.subscription.kind is base.Subscription.Kind.FULL:
-      self._protocol_manager.set_protocol_receiver(
-          outcome.return_value.subscription.protocol_receiver)
-      self._operator_post_create(outcome.return_value.subscription)
-    else:
-      # TODO(nathaniel): Support other subscriptions.
-      raise ValueError(
-          'Unsupported "%s"!' % outcome.return_value.subscription.kind)
-
-  def _store_advance(self, initial_metadata, payload, completion, allowance):
-    if initial_metadata is not None:
-      self._pending_initial_metadata = initial_metadata
-    if payload is not None:
-      self._pending_payloads.append(payload)
-    if completion is not None:
-      self._pending_completion = completion
-    if allowance is not None and self._remote_allowance is not None:
-      self._remote_allowance += allowance
-
-  def _operator_advance(self, initial_metadata, payload, completion, allowance):
-    if self._processing:
-      self._store_advance(initial_metadata, payload, completion, allowance)
-    else:
-      action = False
-      if initial_metadata is not None:
-        action = True
-      if payload is not None:
-        if 0 < self._local_allowance:
-          self._local_allowance -= 1
-          action = True
-        else:
-          self._pending_payloads.append(payload)
-          payload = False
-      if completion is not None:
-        if self._pending_payloads:
-          self._pending_completion = completion
-        else:
-          action = True
-      if allowance is not None and self._remote_allowance is not None:
-        allowance += self._remote_allowance
-        self._remote_allowance = 0
-        action = True
-      if action:
-        self._pool.submit(
-            callable_util.with_exceptions_logged(
-                self._operator_process, _constants.INTERNAL_ERROR_LOG_MESSAGE),
-            self._wrapped_operator, initial_metadata, payload, completion,
-            allowance)
-
-  def set_group_and_method(self, group, method):
-    """See _interfaces.IngestionManager.set_group_and_method for spec."""
-    if self._subscription_creator is not None and not self._processing:
-      self._pool.submit(
-          callable_util.with_exceptions_logged(
-              self._create, _constants.INTERNAL_ERROR_LOG_MESSAGE),
-          self._subscription_creator, group, method)
-      self._processing = True
-
-  def add_local_allowance(self, allowance):
-    """See _interfaces.IngestionManager.add_local_allowance for spec."""
-    if any((self._subscription_creator, self._wrapped_operator,)):
-      self._local_allowance += allowance
-      if not self._processing:
-        initial_metadata, payload, completion, allowance, moar = (
-            self._operator_next())
-        if moar:
-          self._pool.submit(
-              callable_util.with_exceptions_logged(
-                  self._operator_process,
-                  _constants.INTERNAL_ERROR_LOG_MESSAGE),
-              initial_metadata, payload, completion, allowance)
-
-  def local_emissions_done(self):
-    self._remote_allowance = None
-
-  def advance(self, initial_metadata, payload, completion, allowance):
-    """See _interfaces.IngestionManager.advance for specification."""
-    if self._subscription_creator is not None:
-      self._store_advance(initial_metadata, payload, completion, allowance)
-    elif self._wrapped_operator is not None:
-      self._operator_advance(initial_metadata, payload, completion, allowance)
-
-
-def invocation_ingestion_manager(
-    subscription, lock, pool, termination_manager, transmission_manager,
-    expiration_manager, protocol_manager):
-  """Creates an IngestionManager appropriate for invocation-side use.
-
-  Args:
-    subscription: A base.Subscription indicating the customer's interest in the
-      data and results from the service-side of the operation.
-    lock: The operation-wide lock.
-    pool: A thread pool in which to execute customer code.
-    termination_manager: The _interfaces.TerminationManager for the operation.
-    transmission_manager: The _interfaces.TransmissionManager for the
-      operation.
-    expiration_manager: The _interfaces.ExpirationManager for the operation.
-    protocol_manager: The _interfaces.ProtocolManager for the operation.
-
-  Returns:
-    An IngestionManager appropriate for invocation-side use.
-  """
-  return _IngestionManager(
-      lock, pool, subscription, None, termination_manager, transmission_manager,
-      expiration_manager, protocol_manager)
-
-
-def service_ingestion_manager(
-    servicer, operation_context, output_operator, lock, pool,
-    termination_manager, transmission_manager, expiration_manager,
-    protocol_manager):
-  """Creates an IngestionManager appropriate for service-side use.
-
-  The returned IngestionManager will require its set_group_and_name method to be
-  called before its advance method may be called.
-
-  Args:
-    servicer: A base.Servicer for servicing the operation.
-    operation_context: A base.OperationContext for the operation to be passed to
-      the customer.
-    output_operator: A base.Operator for the operation to be passed to the
-      customer and to be called by the customer to accept operation data output
-      by the customer.
-    lock: The operation-wide lock.
-    pool: A thread pool in which to execute customer code.
-    termination_manager: The _interfaces.TerminationManager for the operation.
-    transmission_manager: The _interfaces.TransmissionManager for the
-      operation.
-    expiration_manager: The _interfaces.ExpirationManager for the operation.
-    protocol_manager: The _interfaces.ProtocolManager for the operation.
-
-  Returns:
-    An IngestionManager appropriate for service-side use.
-  """
-  subscription_creator = _ServiceSubscriptionCreator(
-      servicer, operation_context, output_operator)
-  return _IngestionManager(
-      lock, pool, None, subscription_creator, termination_manager,
-      transmission_manager, expiration_manager, protocol_manager)
diff --git a/src/python/grpcio/grpc/framework/core/_interfaces.py b/src/python/grpcio/grpc/framework/core/_interfaces.py
deleted file mode 100644
index 63ac82f80e7b3acef78225a4ae1b67def8b639b1..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_interfaces.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Package-internal interfaces."""
-
-import abc
-
-import six
-
-from grpc.framework.interfaces.base import base
-
-
-class TerminationManager(six.with_metaclass(abc.ABCMeta)):
-  """An object responsible for handling the termination of an operation.
-
-  Attributes:
-    outcome: None if the operation is active or a base.Outcome value if it has
-      terminated.
-  """
-
-  @abc.abstractmethod
-  def add_callback(self, callback):
-    """Registers a callback to be called on operation termination.
-
-    If the operation has already terminated the callback will not be called.
-
-    Args:
-      callback: A callable that will be passed a base.Outcome value.
-
-    Returns:
-      None if the operation has not yet terminated and the passed callback will
-        be called when it does, or a base.Outcome value describing the
-        operation termination if the operation has terminated and the callback
-        will not be called as a result of this method call.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def emission_complete(self):
-    """Indicates that emissions from customer code have completed."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def transmission_complete(self):
-    """Indicates that transmissions to the remote end are complete.
-
-    Returns:
-      True if the operation has terminated or False if the operation remains
-        ongoing.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def reception_complete(self, code, details):
-    """Indicates that reception from the other side is complete.
-
-    Args:
-      code: An application-specific code value.
-      details: An application-specific details value.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def ingestion_complete(self):
-    """Indicates that customer code ingestion of received values is complete."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def expire(self):
-    """Indicates that the operation must abort because it has taken too long."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def abort(self, outcome):
-    """Indicates that the operation must abort for the indicated reason.
-
-    Args:
-      outcome: A base.Outcome indicating operation abortion.
-    """
-    raise NotImplementedError()
-
-
-class TransmissionManager(six.with_metaclass(abc.ABCMeta)):
-  """A manager responsible for transmitting to the other end of an operation."""
-
-  @abc.abstractmethod
-  def kick_off(
-      self, group, method, timeout, protocol_options, initial_metadata,
-      payload, completion, allowance):
-    """Transmits the values associated with operation invocation."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def advance(self, initial_metadata, payload, completion, allowance):
-    """Accepts values for transmission to the other end of the operation.
-
-    Args:
-      initial_metadata: An initial metadata value to be transmitted to the other
-        side of the operation. May only ever be non-None once.
-      payload: A payload value.
-      completion: A base.Completion value. May only ever be non-None in the last
-        transmission to be made to the other side.
-      allowance: A positive integer communicating the number of additional
-        payloads allowed to be transmitted from the other side to this side of
-        the operation, or None if no additional allowance is being granted in
-        this call.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def timeout(self, timeout):
-    """Accepts for transmission to the other side a new timeout value.
-
-    Args:
-      timeout: A positive float used as the new timeout value for the operation
-        to be transmitted to the other side.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def allowance(self, allowance):
-    """Indicates to this manager that the remote customer is allowing payloads.
-
-    Args:
-      allowance: A positive integer indicating the number of additional payloads
-        the remote customer is allowing to be transmitted from this side of the
-        operation.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def remote_complete(self):
-    """Indicates to this manager that data from the remote side is complete."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def abort(self, outcome):
-    """Indicates that the operation has aborted.
-
-    Args:
-      outcome: A base.Outcome for the operation. If None, indicates that the
-        operation abortion should not be communicated to the other side of the
-        operation.
-    """
-    raise NotImplementedError()
-
-
-class ExpirationManager(six.with_metaclass(abc.ABCMeta)):
-  """A manager responsible for aborting the operation if it runs out of time."""
-
-  @abc.abstractmethod
-  def change_timeout(self, timeout):
-    """Changes the timeout allotted for the operation.
-
-    Operation duration is always measure from the beginning of the operation;
-    calling this method changes the operation's allotted time to timeout total
-    seconds, not timeout seconds from the time of this method call.
-
-    Args:
-      timeout: A length of time in seconds to allow for the operation.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deadline(self):
-    """Returns the time until which the operation is allowed to run.
-
-    Returns:
-      The time (seconds since the epoch) at which the operation will expire.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def terminate(self):
-    """Indicates to this manager that the operation has terminated."""
-    raise NotImplementedError()
-
-
-class ProtocolManager(six.with_metaclass(abc.ABCMeta)):
-  """A manager of protocol-specific values passing through an operation."""
-
-  @abc.abstractmethod
-  def set_protocol_receiver(self, protocol_receiver):
-    """Registers the customer object that will receive protocol objects.
-
-    Args:
-      protocol_receiver: A base.ProtocolReceiver to which protocol objects for
-        the operation should be passed.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def accept_protocol_context(self, protocol_context):
-    """Accepts the protocol context object for the operation.
-
-    Args:
-      protocol_context: An object designated for use as the protocol context
-        of the operation, with further semantics implementation-determined.
-    """
-    raise NotImplementedError()
-
-
-class EmissionManager(six.with_metaclass(abc.ABCMeta, base.Operator)):
-  """A manager of values emitted by customer code."""
-
-  @abc.abstractmethod
-  def advance(
-      self, initial_metadata=None, payload=None, completion=None,
-      allowance=None):
-    """Accepts a value emitted by customer code.
-
-    This method should only be called by customer code.
-
-    Args:
-      initial_metadata: An initial metadata value emitted by the local customer
-        to be sent to the other side of the operation.
-      payload: A payload value emitted by the local customer to be sent to the
-        other side of the operation.
-      completion: A Completion value emitted by the local customer to be sent to
-        the other side of the operation.
-      allowance: A positive integer indicating an additional number of payloads
-        that the local customer is willing to accept from the other side of the
-        operation.
-    """
-    raise NotImplementedError()
-
-
-class IngestionManager(six.with_metaclass(abc.ABCMeta)):
-  """A manager responsible for executing customer code.
-
-  This name of this manager comes from its responsibility to pass successive
-  values from the other side of the operation into the code of the local
-  customer.
-  """
-
-  @abc.abstractmethod
-  def set_group_and_method(self, group, method):
-    """Communicates to this IngestionManager the operation group and method.
-
-    Args:
-      group: The group identifier of the operation.
-      method: The method identifier of the operation.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def add_local_allowance(self, allowance):
-    """Communicates to this IngestionManager that more payloads may be ingested.
-
-    Args:
-      allowance: A positive integer indicating an additional number of payloads
-        that the local customer is willing to ingest.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def local_emissions_done(self):
-    """Indicates to this manager that local emissions are done."""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def advance(self, initial_metadata, payload, completion, allowance):
-    """Advances the operation by passing values to the local customer."""
-    raise NotImplementedError()
-
-
-class ReceptionManager(six.with_metaclass(abc.ABCMeta)):
-  """A manager responsible for receiving tickets from the other end."""
-
-  @abc.abstractmethod
-  def receive_ticket(self, ticket):
-    """Handle a ticket from the other side of the operation.
-
-    Args:
-      ticket: A links.Ticket for the operation.
-    """
-    raise NotImplementedError()
-
-
-class Operation(six.with_metaclass(abc.ABCMeta)):
-  """An ongoing operation.
-
-  Attributes:
-    context: A base.OperationContext object for the operation.
-    operator: A base.Operator object for the operation for use by the customer
-      of the operation.
-  """
-
-  @abc.abstractmethod
-  def handle_ticket(self, ticket):
-    """Handle a ticket from the other side of the operation.
-
-    Args:
-      ticket: A links.Ticket from the other side of the operation.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def abort(self, outcome_kind):
-    """Aborts the operation.
-
-    Args:
-      outcome_kind: A base.Outcome.Kind value indicating operation abortion.
-    """
-    raise NotImplementedError()
diff --git a/src/python/grpcio/grpc/framework/core/_operation.py b/src/python/grpcio/grpc/framework/core/_operation.py
deleted file mode 100644
index 020c0c9ed9addae3c27fbe311954a2b02d2d1c56..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_operation.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Implementation of operations."""
-
-import threading
-
-from grpc.framework.core import _context
-from grpc.framework.core import _emission
-from grpc.framework.core import _expiration
-from grpc.framework.core import _ingestion
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _protocol
-from grpc.framework.core import _reception
-from grpc.framework.core import _termination
-from grpc.framework.core import _transmission
-from grpc.framework.core import _utilities
-
-
-class _EasyOperation(_interfaces.Operation):
-  """A trivial implementation of interfaces.Operation."""
-
-  def __init__(
-      self, lock, termination_manager, transmission_manager, expiration_manager,
-      context, operator, reception_manager):
-    """Constructor.
-
-    Args:
-      lock: The operation-wide lock.
-      termination_manager: The _interfaces.TerminationManager for the operation.
-      transmission_manager: The _interfaces.TransmissionManager for the
-        operation.
-      expiration_manager: The _interfaces.ExpirationManager for the operation.
-      context: A base.OperationContext for use by the customer during the
-        operation.
-      operator: A base.Operator for use by the customer during the operation.
-      reception_manager: The _interfaces.ReceptionManager for the operation.
-    """
-    self._lock = lock
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._expiration_manager = expiration_manager
-    self._reception_manager = reception_manager
-
-    self.context = context
-    self.operator = operator
-
-  def handle_ticket(self, ticket):
-    with self._lock:
-      self._reception_manager.receive_ticket(ticket)
-
-  def abort(self, outcome_kind):
-    with self._lock:
-      if self._termination_manager.outcome is None:
-        outcome = _utilities.Outcome(outcome_kind, None, None)
-        self._termination_manager.abort(outcome)
-        self._transmission_manager.abort(outcome)
-        self._expiration_manager.terminate()
-
-
-def invocation_operate(
-    operation_id, group, method, subscription, timeout, protocol_options,
-    initial_metadata, payload, completion, ticket_sink, termination_action,
-    pool):
-  """Constructs objects necessary for front-side operation management.
-
-  Args:
-    operation_id: An object identifying the operation.
-    group: The group identifier of the operation.
-    method: The method identifier of the operation.
-    subscription: A base.Subscription describing the customer's interest in the
-      results of the operation.
-    timeout: A length of time in seconds to allow for the operation.
-    protocol_options: A transport-specific, application-specific, and/or
-      protocol-specific value relating to the invocation. May be None.
-    initial_metadata: An initial metadata value to be sent to the other side of
-      the operation. May be None if the initial metadata will be passed later or
-      if there will be no initial metadata passed at all.
-    payload: The first payload value to be transmitted to the other side. May be
-      None if there is no such value or if the customer chose not to pass it at
-      operation invocation.
-    completion: A base.Completion value indicating the end of values passed to
-      the other side of the operation.
-    ticket_sink: A callable that accepts links.Tickets and delivers them to the
-      other side of the operation.
-    termination_action: A callable that accepts the outcome of the operation as
-      a base.Outcome value to be called on operation completion.
-    pool: A thread pool with which to do the work of the operation.
-
-  Returns:
-    An _interfaces.Operation for the operation.
-  """
-  lock = threading.Lock()
-  with lock:
-    termination_manager = _termination.invocation_termination_manager(
-        termination_action, pool)
-    transmission_manager = _transmission.TransmissionManager(
-        operation_id, ticket_sink, lock, pool, termination_manager)
-    expiration_manager = _expiration.invocation_expiration_manager(
-        timeout, lock, termination_manager, transmission_manager)
-    protocol_manager = _protocol.invocation_protocol_manager(
-        subscription, lock, pool, termination_manager, transmission_manager,
-        expiration_manager)
-    operation_context = _context.OperationContext(
-        lock, termination_manager, transmission_manager, expiration_manager)
-    emission_manager = _emission.EmissionManager(
-        lock, termination_manager, transmission_manager, expiration_manager)
-    ingestion_manager = _ingestion.invocation_ingestion_manager(
-        subscription, lock, pool, termination_manager, transmission_manager,
-        expiration_manager, protocol_manager)
-    reception_manager = _reception.ReceptionManager(
-        termination_manager, transmission_manager, expiration_manager,
-        protocol_manager, ingestion_manager)
-
-    termination_manager.set_expiration_manager(expiration_manager)
-    transmission_manager.set_expiration_manager(expiration_manager)
-    emission_manager.set_ingestion_manager(ingestion_manager)
-
-    transmission_manager.kick_off(
-        group, method, timeout, protocol_options, initial_metadata, payload,
-        completion, None)
-
-  return _EasyOperation(
-      lock, termination_manager, transmission_manager, expiration_manager,
-      operation_context, emission_manager, reception_manager)
-
-
-def service_operate(
-    servicer_package, ticket, ticket_sink, termination_action, pool):
-  """Constructs an Operation for service of an operation.
-
-  Args:
-    servicer_package: A _utilities.ServicerPackage to be used servicing the
-      operation.
-    ticket: The first links.Ticket received for the operation.
-    ticket_sink: A callable that accepts links.Tickets and delivers them to the
-      other side of the operation.
-    termination_action: A callable that accepts the outcome of the operation as
-      a base.Outcome value to be called on operation completion.
-    pool: A thread pool with which to do the work of the operation.
-
-  Returns:
-    An _interfaces.Operation for the operation.
-  """
-  lock = threading.Lock()
-  with lock:
-    termination_manager = _termination.service_termination_manager(
-        termination_action, pool)
-    transmission_manager = _transmission.TransmissionManager(
-        ticket.operation_id, ticket_sink, lock, pool, termination_manager)
-    expiration_manager = _expiration.service_expiration_manager(
-        ticket.timeout, servicer_package.default_timeout,
-        servicer_package.maximum_timeout, lock, termination_manager,
-        transmission_manager)
-    protocol_manager = _protocol.service_protocol_manager(
-        lock, pool, termination_manager, transmission_manager,
-        expiration_manager)
-    operation_context = _context.OperationContext(
-        lock, termination_manager, transmission_manager, expiration_manager)
-    emission_manager = _emission.EmissionManager(
-        lock, termination_manager, transmission_manager, expiration_manager)
-    ingestion_manager = _ingestion.service_ingestion_manager(
-        servicer_package.servicer, operation_context, emission_manager, lock,
-        pool, termination_manager, transmission_manager, expiration_manager,
-        protocol_manager)
-    reception_manager = _reception.ReceptionManager(
-        termination_manager, transmission_manager, expiration_manager,
-        protocol_manager, ingestion_manager)
-
-    termination_manager.set_expiration_manager(expiration_manager)
-    transmission_manager.set_expiration_manager(expiration_manager)
-    emission_manager.set_ingestion_manager(ingestion_manager)
-
-    reception_manager.receive_ticket(ticket)
-
-  return _EasyOperation(
-      lock, termination_manager, transmission_manager, expiration_manager,
-      operation_context, emission_manager, reception_manager)
diff --git a/src/python/grpcio/grpc/framework/core/_protocol.py b/src/python/grpcio/grpc/framework/core/_protocol.py
deleted file mode 100644
index 3177b5e302a1e4fdc2fa2a2f10c4eb3c0ef3f035..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_protocol.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for passing protocol objects in an operation."""
-
-import collections
-import enum
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-
-_EXCEPTION_LOG_MESSAGE = 'Exception delivering protocol object!'
-
-_LOCAL_FAILURE_OUTCOME = _utilities.Outcome(
-    base.Outcome.Kind.LOCAL_FAILURE, None, None)
-
-
-class _Awaited(
-    collections.namedtuple('_Awaited', ('kind', 'value',))):
-
-  @enum.unique
-  class Kind(enum.Enum):
-    NOT_YET_ARRIVED = 'not yet arrived'
-    ARRIVED = 'arrived'
-
-_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None)
-_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None)
-
-
-class _Transitory(
-    collections.namedtuple('_Transitory', ('kind', 'value',))):
-
-  @enum.unique
-  class Kind(enum.Enum):
-    NOT_YET_SEEN = 'not yet seen'
-    PRESENT = 'present'
-    GONE = 'gone'
-
-_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None)
-_GONE = _Transitory(_Transitory.Kind.GONE, None)
-
-
-class _ProtocolManager(_interfaces.ProtocolManager):
-  """An implementation of _interfaces.ExpirationManager."""
-
-  def __init__(
-      self, protocol_receiver, lock, pool, termination_manager,
-      transmission_manager, expiration_manager):
-    """Constructor.
-
-    Args:
-      protocol_receiver: An _Awaited wrapping of the base.ProtocolReceiver to
-        which protocol objects should be passed during the operation. May be
-        of kind _Awaited.Kind.NOT_YET_ARRIVED if the customer's subscription is
-        not yet known and may be of kind _Awaited.Kind.ARRIVED but with a value
-        of None if the customer's subscription did not include a
-        ProtocolReceiver.
-      lock: The operation-wide lock.
-      pool: A thread pool.
-      termination_manager: The _interfaces.TerminationManager for the operation.
-      transmission_manager: The _interfaces.TransmissionManager for the
-        operation.
-      expiration_manager: The _interfaces.ExpirationManager for the operation.
-    """
-    self._lock = lock
-    self._pool = pool
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._expiration_manager = expiration_manager
-
-    self._protocol_receiver = protocol_receiver
-    self._context = _NOT_YET_SEEN
-
-  def _abort_and_notify(self, outcome):
-    if self._termination_manager.outcome is None:
-      self._termination_manager.abort(outcome)
-      self._transmission_manager.abort(outcome)
-      self._expiration_manager.terminate()
-
-  def _deliver(self, behavior, value):
-    def deliver():
-      delivery_outcome = callable_util.call_logging_exceptions(
-          behavior, _EXCEPTION_LOG_MESSAGE, value)
-      if delivery_outcome.kind is callable_util.Outcome.Kind.RAISED:
-        with self._lock:
-          self._abort_and_notify(_LOCAL_FAILURE_OUTCOME)
-    self._pool.submit(
-        callable_util.with_exceptions_logged(
-            deliver, _constants.INTERNAL_ERROR_LOG_MESSAGE))
-
-  def set_protocol_receiver(self, protocol_receiver):
-    """See _interfaces.ProtocolManager.set_protocol_receiver for spec."""
-    self._protocol_receiver = _Awaited(_Awaited.Kind.ARRIVED, protocol_receiver)
-    if (self._context.kind is _Transitory.Kind.PRESENT and
-        protocol_receiver is not None):
-      self._deliver(protocol_receiver.context, self._context.value)
-      self._context = _GONE
-
-  def accept_protocol_context(self, protocol_context):
-    """See _interfaces.ProtocolManager.accept_protocol_context for spec."""
-    if self._protocol_receiver.kind is _Awaited.Kind.ARRIVED:
-      if self._protocol_receiver.value is not None:
-        self._deliver(self._protocol_receiver.value.context, protocol_context)
-      self._context = _GONE
-    else:
-      self._context = _Transitory(_Transitory.Kind.PRESENT, protocol_context)
-
-
-def invocation_protocol_manager(
-    subscription, lock, pool, termination_manager, transmission_manager,
-    expiration_manager):
-  """Creates an _interfaces.ProtocolManager for invocation-side use.
-
-  Args:
-    subscription: The local customer's subscription to the operation.
-    lock: The operation-wide lock.
-    pool: A thread pool.
-    termination_manager: The _interfaces.TerminationManager for the operation.
-    transmission_manager: The _interfaces.TransmissionManager for the
-      operation.
-    expiration_manager: The _interfaces.ExpirationManager for the operation.
-  """
-  if subscription.kind is base.Subscription.Kind.FULL:
-    awaited_protocol_receiver = _Awaited(
-        _Awaited.Kind.ARRIVED, subscription.protocol_receiver)
-  else:
-    awaited_protocol_receiver = _ARRIVED_AND_NONE
-  return _ProtocolManager(
-      awaited_protocol_receiver, lock, pool, termination_manager,
-      transmission_manager, expiration_manager)
-
-
-def service_protocol_manager(
-    lock, pool, termination_manager, transmission_manager, expiration_manager):
-  """Creates an _interfaces.ProtocolManager for service-side use.
-
-  Args:
-    lock: The operation-wide lock.
-    pool: A thread pool.
-    termination_manager: The _interfaces.TerminationManager for the operation.
-    transmission_manager: The _interfaces.TransmissionManager for the
-      operation.
-    expiration_manager: The _interfaces.ExpirationManager for the operation.
-  """
-  return _ProtocolManager(
-      _NOT_YET_ARRIVED, lock, pool, termination_manager, transmission_manager,
-      expiration_manager)
diff --git a/src/python/grpcio/grpc/framework/core/_reception.py b/src/python/grpcio/grpc/framework/core/_reception.py
deleted file mode 100644
index ff81450dee9ed53616dd80e5f72d0adff69d786c..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_reception.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for ticket reception."""
-
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.links import links
-
-_REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND = {
-    links.Ticket.Termination.CANCELLATION: base.Outcome.Kind.CANCELLED,
-    links.Ticket.Termination.EXPIRATION: base.Outcome.Kind.EXPIRED,
-    links.Ticket.Termination.SHUTDOWN: base.Outcome.Kind.REMOTE_SHUTDOWN,
-    links.Ticket.Termination.RECEPTION_FAILURE:
-        base.Outcome.Kind.RECEPTION_FAILURE,
-    links.Ticket.Termination.TRANSMISSION_FAILURE:
-        base.Outcome.Kind.TRANSMISSION_FAILURE,
-    links.Ticket.Termination.LOCAL_FAILURE: base.Outcome.Kind.REMOTE_FAILURE,
-    links.Ticket.Termination.REMOTE_FAILURE: base.Outcome.Kind.LOCAL_FAILURE,
-}
-
-_RECEPTION_FAILURE_OUTCOME = _utilities.Outcome(
-    base.Outcome.Kind.RECEPTION_FAILURE, None, None)
-
-
-def _carrying_protocol_context(ticket):
-  return ticket.protocol is not None and ticket.protocol.kind in (
-      links.Protocol.Kind.INVOCATION_CONTEXT,
-      links.Protocol.Kind.SERVICER_CONTEXT,)
-
-
-class ReceptionManager(_interfaces.ReceptionManager):
-  """A ReceptionManager based around a _Receiver passed to it."""
-
-  def __init__(
-      self, termination_manager, transmission_manager, expiration_manager,
-      protocol_manager, ingestion_manager):
-    """Constructor.
-
-    Args:
-      termination_manager: The operation's _interfaces.TerminationManager.
-      transmission_manager: The operation's _interfaces.TransmissionManager.
-      expiration_manager: The operation's _interfaces.ExpirationManager.
-      protocol_manager: The operation's _interfaces.ProtocolManager.
-      ingestion_manager: The operation's _interfaces.IngestionManager.
-    """
-    self._termination_manager = termination_manager
-    self._transmission_manager = transmission_manager
-    self._expiration_manager = expiration_manager
-    self._protocol_manager = protocol_manager
-    self._ingestion_manager = ingestion_manager
-
-    self._lowest_unseen_sequence_number = 0
-    self._out_of_sequence_tickets = {}
-    self._aborted = False
-
-  def _abort(self, outcome):
-    self._aborted = True
-    if self._termination_manager.outcome is None:
-      self._termination_manager.abort(outcome)
-      self._transmission_manager.abort(None)
-      self._expiration_manager.terminate()
-
-  def _sequence_failure(self, ticket):
-    """Determines a just-arrived ticket's sequential legitimacy.
-
-    Args:
-      ticket: A just-arrived ticket.
-
-    Returns:
-      True if the ticket is sequentially legitimate; False otherwise.
-    """
-    if ticket.sequence_number < self._lowest_unseen_sequence_number:
-      return True
-    elif ticket.sequence_number in self._out_of_sequence_tickets:
-      return True
-    else:
-      return False
-
-  def _process_one(self, ticket):
-    if ticket.sequence_number == 0:
-      self._ingestion_manager.set_group_and_method(ticket.group, ticket.method)
-      if _carrying_protocol_context(ticket):
-        self._protocol_manager.accept_protocol_context(ticket.protocol.value)
-      else:
-        self._protocol_manager.accept_protocol_context(None)
-    if ticket.timeout is not None:
-      self._expiration_manager.change_timeout(ticket.timeout)
-    if ticket.termination is None:
-      completion = None
-    else:
-      completion = utilities.completion(
-          ticket.terminal_metadata, ticket.code, ticket.message)
-      self._termination_manager.reception_complete(ticket.code, ticket.message)
-    self._ingestion_manager.advance(
-        ticket.initial_metadata, ticket.payload, completion, ticket.allowance)
-    if ticket.allowance is not None:
-      self._transmission_manager.allowance(ticket.allowance)
-
-  def _process(self, ticket):
-    """Process those tickets ready to be processed.
-
-    Args:
-      ticket: A just-arrived ticket the sequence number of which matches this
-        _ReceptionManager's _lowest_unseen_sequence_number field.
-    """
-    while True:
-      self._process_one(ticket)
-      next_ticket = self._out_of_sequence_tickets.pop(
-          ticket.sequence_number + 1, None)
-      if next_ticket is None:
-        self._lowest_unseen_sequence_number = ticket.sequence_number + 1
-        return
-      else:
-        ticket = next_ticket
-
-  def receive_ticket(self, ticket):
-    """See _interfaces.ReceptionManager.receive_ticket for specification."""
-    if self._aborted:
-      return
-    elif self._sequence_failure(ticket):
-      self._abort(_RECEPTION_FAILURE_OUTCOME)
-    elif ticket.termination not in (None, links.Ticket.Termination.COMPLETION):
-      outcome_kind = _REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND[
-          ticket.termination]
-      self._abort(
-          _utilities.Outcome(outcome_kind, ticket.code, ticket.message))
-    elif ticket.sequence_number == self._lowest_unseen_sequence_number:
-      self._process(ticket)
-    else:
-      self._out_of_sequence_tickets[ticket.sequence_number] = ticket
diff --git a/src/python/grpcio/grpc/framework/core/_termination.py b/src/python/grpcio/grpc/framework/core/_termination.py
deleted file mode 100644
index fff3a3fc146fc7324bc594716a4a7b2dd6f674f0..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_termination.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for operation termination."""
-
-import abc
-
-import six
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-
-
-def _invocation_completion_predicate(
-    unused_emission_complete, unused_transmission_complete,
-    unused_reception_complete, ingestion_complete):
-  return ingestion_complete
-
-
-def _service_completion_predicate(
-    unused_emission_complete, transmission_complete, unused_reception_complete,
-    ingestion_complete):
-  return transmission_complete and ingestion_complete
-
-
-class TerminationManager(six.with_metaclass(abc.ABCMeta, _interfaces.TerminationManager)):
-  """A _interfaces.TransmissionManager on which another manager may be set."""
-
-  @abc.abstractmethod
-  def set_expiration_manager(self, expiration_manager):
-    """Sets the expiration manager with which this manager will interact.
-
-    Args:
-      expiration_manager: The _interfaces.ExpirationManager associated with the
-        current operation.
-    """
-    raise NotImplementedError()
-
-
-class _TerminationManager(TerminationManager):
-  """An implementation of TerminationManager."""
-
-  def __init__(self, predicate, action, pool):
-    """Constructor.
-
-    Args:
-      predicate: One of _invocation_completion_predicate or
-        _service_completion_predicate to be used to determine when the operation
-        has completed.
-      action: A behavior to pass the operation outcome's kind on operation
-        termination.
-      pool: A thread pool.
-    """
-    self._predicate = predicate
-    self._action = action
-    self._pool = pool
-    self._expiration_manager = None
-
-    self._callbacks = []
-
-    self._code = None
-    self._details = None
-    self._emission_complete = False
-    self._transmission_complete = False
-    self._reception_complete = False
-    self._ingestion_complete = False
-
-    # The None-ness of outcome is the operation-wide record of whether and how
-    # the operation has terminated.
-    self.outcome = None
-
-  def set_expiration_manager(self, expiration_manager):
-    self._expiration_manager = expiration_manager
-
-  def _terminate_internal_only(self, outcome):
-    """Terminates the operation.
-
-    Args:
-      outcome: A base.Outcome describing the outcome of the operation.
-    """
-    self.outcome = outcome
-    callbacks = list(self._callbacks)
-    self._callbacks = None
-
-    act = callable_util.with_exceptions_logged(
-        self._action, _constants.INTERNAL_ERROR_LOG_MESSAGE)
-
-    # TODO(issue 3202): Don't call the local application's callbacks if it has
-    # previously shown a programming defect.
-    if False and outcome.kind is base.Outcome.Kind.LOCAL_FAILURE:
-      self._pool.submit(act, base.Outcome.Kind.LOCAL_FAILURE)
-    else:
-      def call_callbacks_and_act(callbacks, outcome):
-        for callback in callbacks:
-          callback_outcome = callable_util.call_logging_exceptions(
-              callback, _constants.TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE,
-              outcome)
-          if callback_outcome.exception is not None:
-            act_outcome_kind = base.Outcome.Kind.LOCAL_FAILURE
-            break
-        else:
-          act_outcome_kind = outcome.kind
-        act(act_outcome_kind)
-
-      self._pool.submit(
-          callable_util.with_exceptions_logged(
-              call_callbacks_and_act, _constants.INTERNAL_ERROR_LOG_MESSAGE),
-          callbacks, outcome)
-
-  def _terminate_and_notify(self, outcome):
-    self._terminate_internal_only(outcome)
-    self._expiration_manager.terminate()
-
-  def _perhaps_complete(self):
-    if self._predicate(
-        self._emission_complete, self._transmission_complete,
-        self._reception_complete, self._ingestion_complete):
-      self._terminate_and_notify(
-          _utilities.Outcome(
-              base.Outcome.Kind.COMPLETED, self._code, self._details))
-      return True
-    else:
-      return False
-
-  def is_active(self):
-    """See _interfaces.TerminationManager.is_active for specification."""
-    return self.outcome is None
-
-  def add_callback(self, callback):
-    """See _interfaces.TerminationManager.add_callback for specification."""
-    if self.outcome is None:
-      self._callbacks.append(callback)
-      return None
-    else:
-      return self.outcome
-
-  def emission_complete(self):
-    """See superclass method for specification."""
-    if self.outcome is None:
-      self._emission_complete = True
-      self._perhaps_complete()
-
-  def transmission_complete(self):
-    """See superclass method for specification."""
-    if self.outcome is None:
-      self._transmission_complete = True
-      return self._perhaps_complete()
-    else:
-      return False
-
-  def reception_complete(self, code, details):
-    """See superclass method for specification."""
-    if self.outcome is None:
-      self._reception_complete = True
-      self._code = code
-      self._details = details
-      self._perhaps_complete()
-
-  def ingestion_complete(self):
-    """See superclass method for specification."""
-    if self.outcome is None:
-      self._ingestion_complete = True
-      self._perhaps_complete()
-
-  def expire(self):
-    """See _interfaces.TerminationManager.expire for specification."""
-    self._terminate_internal_only(
-        _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None))
-
-  def abort(self, outcome):
-    """See _interfaces.TerminationManager.abort for specification."""
-    self._terminate_and_notify(outcome)
-
-
-def invocation_termination_manager(action, pool):
-  """Creates a TerminationManager appropriate for invocation-side use.
-
-  Args:
-    action: An action to call on operation termination.
-    pool: A thread pool in which to execute the passed action and any
-      termination callbacks that are registered during the operation.
-
-  Returns:
-    A TerminationManager appropriate for invocation-side use.
-  """
-  return _TerminationManager(_invocation_completion_predicate, action, pool)
-
-
-def service_termination_manager(action, pool):
-  """Creates a TerminationManager appropriate for service-side use.
-
-  Args:
-    action: An action to call on operation termination.
-    pool: A thread pool in which to execute the passed action and any
-      termination callbacks that are registered during the operation.
-
-  Returns:
-    A TerminationManager appropriate for service-side use.
-  """
-  return _TerminationManager(_service_completion_predicate, action, pool)
diff --git a/src/python/grpcio/grpc/framework/core/_transmission.py b/src/python/grpcio/grpc/framework/core/_transmission.py
deleted file mode 100644
index 65b12c4160eb28778dc1ba0d1a7ff4055175de25..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/_transmission.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for ticket transmission during an operation."""
-
-import collections
-import enum
-
-from grpc.framework.core import _constants
-from grpc.framework.core import _interfaces
-from grpc.framework.core import _utilities
-from grpc.framework.foundation import callable_util
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.links import links
-
-_TRANSMISSION_EXCEPTION_LOG_MESSAGE = 'Exception during transmission!'
-
-_TRANSMISSION_FAILURE_OUTCOME = _utilities.Outcome(
-    base.Outcome.Kind.TRANSMISSION_FAILURE, None, None)
-
-
-def _explode_completion(completion):
-  if completion is None:
-    return None, None, None, None
-  else:
-    return (
-        completion.terminal_metadata, completion.code, completion.message,
-        links.Ticket.Termination.COMPLETION)
-
-
-class _Abort(
-    collections.namedtuple(
-        '_Abort', ('kind', 'termination', 'code', 'details',))):
-  """Tracks whether the operation aborted and what is to be done about it.
-
-  Attributes:
-    kind: A Kind value describing the overall kind of the _Abort.
-    termination: A links.Ticket.Termination value to be sent to the other side
-      of the operation. Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED.
-    code: A code value to be sent to the other side of the operation. Only
-      valid if kind is Kind.ABORTED_NOTIFY_NEEDED.
-    details: A details value to be sent to the other side of the operation.
-      Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED.
-  """
-
-  @enum.unique
-  class Kind(enum.Enum):
-    NOT_ABORTED = 'not aborted'
-    ABORTED_NOTIFY_NEEDED = 'aborted notify needed'
-    ABORTED_NO_NOTIFY = 'aborted no notify'
-
-_NOT_ABORTED = _Abort(_Abort.Kind.NOT_ABORTED, None, None, None)
-_ABORTED_NO_NOTIFY = _Abort(_Abort.Kind.ABORTED_NO_NOTIFY, None, None, None)
-
-
-class TransmissionManager(_interfaces.TransmissionManager):
-  """An _interfaces.TransmissionManager that sends links.Tickets."""
-
-  def __init__(
-      self, operation_id, ticket_sink, lock, pool, termination_manager):
-    """Constructor.
-
-    Args:
-      operation_id: The operation's ID.
-      ticket_sink: A callable that accepts tickets and sends them to the other
-        side of the operation.
-      lock: The operation-servicing-wide lock object.
-      pool: A thread pool in which the work of transmitting tickets will be
-        performed.
-      termination_manager: The _interfaces.TerminationManager associated with
-        this operation.
-    """
-    self._lock = lock
-    self._pool = pool
-    self._ticket_sink = ticket_sink
-    self._operation_id = operation_id
-    self._termination_manager = termination_manager
-    self._expiration_manager = None
-
-    self._lowest_unused_sequence_number = 0
-    self._remote_allowance = 1
-    self._remote_complete = False
-    self._timeout = None
-    self._local_allowance = 0
-    self._initial_metadata = None
-    self._payloads = []
-    self._completion = None
-    self._abort = _NOT_ABORTED
-    self._transmitting = False
-
-  def set_expiration_manager(self, expiration_manager):
-    """Sets the ExpirationManager with which this manager will cooperate."""
-    self._expiration_manager = expiration_manager
-
-  def _next_ticket(self):
-    """Creates the next ticket to be transmitted.
-
-    Returns:
-      A links.Ticket to be sent to the other side of the operation or None if
-        there is nothing to be sent at this time.
-    """
-    if self._abort.kind is _Abort.Kind.ABORTED_NO_NOTIFY:
-      return None
-    elif self._abort.kind is _Abort.Kind.ABORTED_NOTIFY_NEEDED:
-      termination = self._abort.termination
-      code, details = self._abort.code, self._abort.details
-      self._abort = _ABORTED_NO_NOTIFY
-      return links.Ticket(
-          self._operation_id, self._lowest_unused_sequence_number, None, None,
-          None, None, None, None, None, None, code, details, termination, None)
-
-    action = False
-    # TODO(nathaniel): Support other subscriptions.
-    local_subscription = links.Ticket.Subscription.FULL
-    timeout = self._timeout
-    if timeout is not None:
-      self._timeout = None
-      action = True
-    if self._local_allowance <= 0:
-      allowance = None
-    else:
-      allowance = self._local_allowance
-      self._local_allowance = 0
-      action = True
-    initial_metadata = self._initial_metadata
-    if initial_metadata is not None:
-      self._initial_metadata = None
-      action = True
-    if not self._payloads or self._remote_allowance <= 0:
-      payload = None
-    else:
-      payload = self._payloads.pop(0)
-      self._remote_allowance -= 1
-      action = True
-    if self._completion is None or self._payloads:
-      terminal_metadata, code, message, termination = None, None, None, None
-    else:
-      terminal_metadata, code, message, termination = _explode_completion(
-          self._completion)
-      self._completion = None
-      action = True
-
-    if action:
-      ticket = links.Ticket(
-          self._operation_id, self._lowest_unused_sequence_number, None, None,
-          local_subscription, timeout, allowance, initial_metadata, payload,
-          terminal_metadata, code, message, termination, None)
-      self._lowest_unused_sequence_number += 1
-      return ticket
-    else:
-      return None
-
-  def _transmit(self, ticket):
-    """Commences the transmission loop sending tickets.
-
-    Args:
-      ticket: A links.Ticket to be sent to the other side of the operation.
-    """
-    def transmit(ticket):
-      while True:
-        transmission_outcome = callable_util.call_logging_exceptions(
-            self._ticket_sink, _TRANSMISSION_EXCEPTION_LOG_MESSAGE, ticket)
-        if transmission_outcome.exception is None:
-          with self._lock:
-            if ticket.termination is links.Ticket.Termination.COMPLETION:
-              self._termination_manager.transmission_complete()
-            ticket = self._next_ticket()
-            if ticket is None:
-              self._transmitting = False
-              return
-        else:
-          with self._lock:
-            self._abort = _ABORTED_NO_NOTIFY
-            if self._termination_manager.outcome is None:
-              self._termination_manager.abort(_TRANSMISSION_FAILURE_OUTCOME)
-              self._expiration_manager.terminate()
-            return
-
-    self._pool.submit(callable_util.with_exceptions_logged(
-        transmit, _constants.INTERNAL_ERROR_LOG_MESSAGE), ticket)
-    self._transmitting = True
-
-  def kick_off(
-      self, group, method, timeout, protocol_options, initial_metadata,
-      payload, completion, allowance):
-    """See _interfaces.TransmissionManager.kickoff for specification."""
-    # TODO(nathaniel): Support other subscriptions.
-    subscription = links.Ticket.Subscription.FULL
-    terminal_metadata, code, message, termination = _explode_completion(
-        completion)
-    self._remote_allowance = 1 if payload is None else 0
-    protocol = links.Protocol(links.Protocol.Kind.CALL_OPTION, protocol_options)
-    ticket = links.Ticket(
-        self._operation_id, 0, group, method, subscription, timeout, allowance,
-        initial_metadata, payload, terminal_metadata, code, message,
-        termination, protocol)
-    self._lowest_unused_sequence_number = 1
-    self._transmit(ticket)
-
-  def advance(self, initial_metadata, payload, completion, allowance):
-    """See _interfaces.TransmissionManager.advance for specification."""
-    if self._abort.kind is not _Abort.Kind.NOT_ABORTED:
-      return
-
-    effective_initial_metadata = initial_metadata
-    effective_payload = payload
-    effective_completion = completion
-    if allowance is not None and not self._remote_complete:
-      effective_allowance = allowance
-    else:
-      effective_allowance = None
-    if self._transmitting:
-      if effective_initial_metadata is not None:
-        self._initial_metadata = effective_initial_metadata
-      if effective_payload is not None:
-        self._payloads.append(effective_payload)
-      if effective_completion is not None:
-        self._completion = effective_completion
-      if effective_allowance is not None:
-        self._local_allowance += effective_allowance
-    else:
-      if effective_payload is not None:
-        if 0 < self._remote_allowance:
-          ticket_payload = effective_payload
-          self._remote_allowance -= 1
-        else:
-          self._payloads.append(effective_payload)
-          ticket_payload = None
-      else:
-        ticket_payload = None
-      if effective_completion is not None and not self._payloads:
-        ticket_completion = effective_completion
-      else:
-        self._completion = effective_completion
-        ticket_completion = None
-      if any(
-          (effective_initial_metadata, ticket_payload, ticket_completion,
-           effective_allowance)):
-        terminal_metadata, code, message, termination = _explode_completion(
-            completion)
-        ticket = links.Ticket(
-            self._operation_id, self._lowest_unused_sequence_number, None, None,
-            None, None, allowance, effective_initial_metadata, ticket_payload,
-            terminal_metadata, code, message, termination, None)
-        self._lowest_unused_sequence_number += 1
-        self._transmit(ticket)
-
-  def timeout(self, timeout):
-    """See _interfaces.TransmissionManager.timeout for specification."""
-    if self._abort.kind is not _Abort.Kind.NOT_ABORTED:
-      return
-    elif self._transmitting:
-      self._timeout = timeout
-    else:
-      ticket = links.Ticket(
-          self._operation_id, self._lowest_unused_sequence_number, None, None,
-          None, timeout, None, None, None, None, None, None, None, None)
-      self._lowest_unused_sequence_number += 1
-      self._transmit(ticket)
-
-  def allowance(self, allowance):
-    """See _interfaces.TransmissionManager.allowance for specification."""
-    if self._abort.kind is not _Abort.Kind.NOT_ABORTED:
-      return
-    elif self._transmitting or not self._payloads:
-      self._remote_allowance += allowance
-    else:
-      self._remote_allowance += allowance - 1
-      payload = self._payloads.pop(0)
-      if self._payloads:
-        completion = None
-      else:
-        completion = self._completion
-        self._completion = None
-      terminal_metadata, code, message, termination = _explode_completion(
-          completion)
-      ticket = links.Ticket(
-          self._operation_id, self._lowest_unused_sequence_number, None, None,
-          None, None, None, None, payload, terminal_metadata, code, message,
-          termination, None)
-      self._lowest_unused_sequence_number += 1
-      self._transmit(ticket)
-
-  def remote_complete(self):
-    """See _interfaces.TransmissionManager.remote_complete for specification."""
-    self._remote_complete = True
-    self._local_allowance = 0
-
-  def abort(self, outcome):
-    """See _interfaces.TransmissionManager.abort for specification."""
-    if self._abort.kind is _Abort.Kind.NOT_ABORTED:
-      if outcome is None:
-        self._abort = _ABORTED_NO_NOTIFY
-      else:
-        termination = _constants.ABORTION_OUTCOME_TO_TICKET_TERMINATION.get(
-            outcome.kind)
-        if termination is None:
-          self._abort = _ABORTED_NO_NOTIFY
-        elif self._transmitting:
-          self._abort = _Abort(
-              _Abort.Kind.ABORTED_NOTIFY_NEEDED, termination, outcome.code,
-              outcome.details)
-        else:
-          ticket = links.Ticket(
-              self._operation_id, self._lowest_unused_sequence_number, None,
-              None, None, None, None, None, None, None, outcome.code,
-              outcome.details, termination, None)
-          self._transmit(ticket)
-          self._abort = _ABORTED_NO_NOTIFY
diff --git a/src/python/grpcio/grpc/framework/core/implementations.py b/src/python/grpcio/grpc/framework/core/implementations.py
deleted file mode 100644
index 364a7faed4093a7cfd92070dea9de423b386e0a1..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/core/implementations.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Entry points into the ticket-exchange-based base layer implementation."""
-
-# base and links are referenced from specification in this module.
-from grpc.framework.core import _end
-from grpc.framework.interfaces.base import base  # pylint: disable=unused-import
-from grpc.framework.interfaces.links import links  # pylint: disable=unused-import
-
-
-def invocation_end_link():
-  """Creates a base.End-links.Link suitable for operation invocation.
-
-  Returns:
-    An object that is both a base.End and a links.Link, that supports operation
-      invocation, and that translates operation invocation into ticket exchange.
-  """
-  return _end.serviceless_end_link()
-
-
-def service_end_link(servicer, default_timeout, maximum_timeout):
-  """Creates a base.End-links.Link suitable for operation service.
-
-  Args:
-    servicer: A base.Servicer for servicing operations.
-    default_timeout: A length of time in seconds to be used as the default
-      time alloted for a single operation.
-    maximum_timeout: A length of time in seconds to be used as the maximum
-      time alloted for a single operation.
-
-  Returns:
-    An object that is both a base.End and a links.Link and that services
-      operations that arrive at it through ticket exchange.
-  """
-  return _end.serviceful_end_link(servicer, default_timeout, maximum_timeout)
diff --git a/src/python/grpcio/grpc/framework/crust/_calls.py b/src/python/grpcio/grpc/framework/crust/_calls.py
deleted file mode 100644
index bff940d74710da3a6be3a2c099cf8e661f51f910..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/crust/_calls.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Utility functions for invoking RPCs."""
-
-from grpc.framework.crust import _control
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.face import face
-
-_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!'
-
-_EMPTY_COMPLETION = utilities.completion(None, None, None)
-
-
-def _invoke(
-    end, group, method, timeout, protocol_options, initial_metadata, payload,
-    complete):
-  rendezvous = _control.Rendezvous(None, None)
-  subscription = utilities.full_subscription(
-      rendezvous, _control.protocol_receiver(rendezvous))
-  operation_context, operator = end.operate(
-      group, method, subscription, timeout, protocol_options=protocol_options,
-      initial_metadata=initial_metadata, payload=payload,
-      completion=_EMPTY_COMPLETION if complete else None)
-  rendezvous.set_operator_and_context(operator, operation_context)
-  outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
-  if outcome is not None:
-    rendezvous.set_outcome(outcome)
-  return rendezvous, operation_context, outcome
-
-
-def _event_return_unary(
-    receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
-  if outcome is None:
-    def in_pool():
-      abortion = rendezvous.add_abortion_callback(abortion_callback)
-      if abortion is None:
-        try:
-          receiver.initial_metadata(rendezvous.initial_metadata())
-          receiver.response(next(rendezvous))
-          receiver.complete(
-              rendezvous.terminal_metadata(), rendezvous.code(),
-              rendezvous.details())
-        except face.AbortionError:
-          pass
-      else:
-        abortion_callback(abortion)
-    pool.submit(_control.pool_wrap(in_pool, operation_context))
-  return rendezvous
-
-
-def _event_return_stream(
-    receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
-  if outcome is None:
-    def in_pool():
-      abortion = rendezvous.add_abortion_callback(abortion_callback)
-      if abortion is None:
-        try:
-          receiver.initial_metadata(rendezvous.initial_metadata())
-          for response in rendezvous:
-            receiver.response(response)
-          receiver.complete(
-              rendezvous.terminal_metadata(), rendezvous.code(),
-              rendezvous.details())
-        except face.AbortionError:
-          pass
-      else:
-        abortion_callback(abortion)
-    pool.submit(_control.pool_wrap(in_pool, operation_context))
-  return rendezvous
-
-
-def blocking_unary_unary(
-    end, group, method, timeout, with_call, protocol_options, initial_metadata,
-    payload):
-  """Services in a blocking fashion a unary-unary servicer method."""
-  rendezvous, unused_operation_context, unused_outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, payload,
-      True)
-  if with_call:
-    return next(rendezvous), rendezvous
-  else:
-    return next(rendezvous)
-
-
-def future_unary_unary(
-    end, group, method, timeout, protocol_options, initial_metadata, payload):
-  """Services a value-in value-out servicer method by returning a Future."""
-  rendezvous, unused_operation_context, unused_outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, payload,
-      True)
-  return rendezvous
-
-
-def inline_unary_stream(
-    end, group, method, timeout, protocol_options, initial_metadata, payload):
-  """Services a value-in stream-out servicer method."""
-  rendezvous, unused_operation_context, unused_outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, payload,
-      True)
-  return rendezvous
-
-
-def blocking_stream_unary(
-    end, group, method, timeout, with_call, protocol_options, initial_metadata,
-    payload_iterator, pool):
-  """Services in a blocking fashion a stream-in value-out servicer method."""
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, None,
-      False)
-  if outcome is None:
-    def in_pool():
-      for payload in payload_iterator:
-        rendezvous.consume(payload)
-      rendezvous.terminate()
-    pool.submit(_control.pool_wrap(in_pool, operation_context))
-    if with_call:
-      return next(rendezvous), rendezvous
-    else:
-      return next(rendezvous)
-  else:
-    if with_call:
-      return next(rendezvous), rendezvous
-    else:
-      return next(rendezvous)
-
-
-def future_stream_unary(
-    end, group, method, timeout, protocol_options, initial_metadata,
-    payload_iterator, pool):
-  """Services a stream-in value-out servicer method by returning a Future."""
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, None,
-      False)
-  if outcome is None:
-    def in_pool():
-      for payload in payload_iterator:
-        rendezvous.consume(payload)
-      rendezvous.terminate()
-    pool.submit(_control.pool_wrap(in_pool, operation_context))
-  return rendezvous
-
-
-def inline_stream_stream(
-    end, group, method, timeout, protocol_options, initial_metadata,
-    payload_iterator, pool):
-  """Services a stream-in stream-out servicer method."""
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, None,
-      False)
-  if outcome is None:
-    def in_pool():
-      for payload in payload_iterator:
-        rendezvous.consume(payload)
-      rendezvous.terminate()
-    pool.submit(_control.pool_wrap(in_pool, operation_context))
-  return rendezvous
-
-
-def event_unary_unary(
-    end, group, method, timeout, protocol_options, initial_metadata, payload,
-    receiver, abortion_callback, pool):
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, payload,
-      True)
-  return _event_return_unary(
-      receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
-
-
-def event_unary_stream(
-    end, group, method, timeout, protocol_options, initial_metadata, payload,
-    receiver, abortion_callback, pool):
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, payload,
-      True)
-  return _event_return_stream(
-      receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
-
-
-def event_stream_unary(
-    end, group, method, timeout, protocol_options, initial_metadata, receiver,
-    abortion_callback, pool):
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, None,
-      False)
-  return _event_return_unary(
-      receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
-
-
-def event_stream_stream(
-    end, group, method, timeout, protocol_options, initial_metadata, receiver,
-    abortion_callback, pool):
-  rendezvous, operation_context, outcome = _invoke(
-      end, group, method, timeout, protocol_options, initial_metadata, None,
-      False)
-  return _event_return_stream(
-      receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
diff --git a/src/python/grpcio/grpc/framework/crust/_control.py b/src/python/grpcio/grpc/framework/crust/_control.py
deleted file mode 100644
index 9b4167bda0c20dbab18a7f0ae95b017fc31f4de4..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/crust/_control.py
+++ /dev/null
@@ -1,584 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior for translating between sync and async control flow."""
-
-import collections
-import enum
-import sys
-import threading
-import time
-
-from grpc.framework.foundation import abandonment
-from grpc.framework.foundation import callable_util
-from grpc.framework.foundation import future
-from grpc.framework.foundation import stream
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.face import face
-
-_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!'
-_INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Crust) Internal Error! )-:'
-
-_CANNOT_SET_INITIAL_METADATA = (
-    'Could not set initial metadata - has it already been set, or has a ' +
-    'payload already been sent?')
-_CANNOT_SET_TERMINAL_METADATA = (
-    'Could not set terminal metadata - has it already been set, or has RPC ' +
-    'completion already been indicated?')
-_CANNOT_SET_CODE = (
-    'Could not set code - has it already been set, or has RPC completion ' +
-    'already been indicated?')
-_CANNOT_SET_DETAILS = (
-    'Could not set details - has it already been set, or has RPC completion ' +
-    'already been indicated?')
-
-
-class _DummyOperator(base.Operator):
-
-  def advance(
-      self, initial_metadata=None, payload=None, completion=None,
-      allowance=None):
-    pass
-
-_DUMMY_OPERATOR = _DummyOperator()
-
-
-class _Awaited(
-    collections.namedtuple('_Awaited', ('kind', 'value',))):
-
-  @enum.unique
-  class Kind(enum.Enum):
-    NOT_YET_ARRIVED = 'not yet arrived'
-    ARRIVED = 'arrived'
-
-_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None)
-_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None)
-
-
-class _Transitory(
-    collections.namedtuple('_Transitory', ('kind', 'value',))):
-
-  @enum.unique
-  class Kind(enum.Enum):
-    NOT_YET_SEEN = 'not yet seen'
-    PRESENT = 'present'
-    GONE = 'gone'
-
-_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None)
-_GONE = _Transitory(_Transitory.Kind.GONE, None)
-
-
-class _Termination(
-    collections.namedtuple(
-        '_Termination', ('terminated', 'abortion', 'abortion_error',))):
-  """Values indicating whether and how an RPC has terminated.
-
-  Attributes:
-    terminated: A boolean indicating whether or not the RPC has terminated.
-    abortion: A face.Abortion value describing the RPC's abortion or None if the
-      RPC did not abort.
-    abortion_error: A face.AbortionError describing the RPC's abortion or None
-      if the RPC did not abort.
-  """
-
-_NOT_TERMINATED = _Termination(False, None, None)
-
-_OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR = {
-    base.Outcome.Kind.COMPLETED: lambda *unused_args: _Termination(
-        True, None, None),
-    base.Outcome.Kind.CANCELLED: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.CANCELLED, *args),
-        face.CancellationError(*args)),
-    base.Outcome.Kind.EXPIRED: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.EXPIRED, *args),
-        face.ExpirationError(*args)),
-    base.Outcome.Kind.LOCAL_SHUTDOWN: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.LOCAL_SHUTDOWN, *args),
-        face.LocalShutdownError(*args)),
-    base.Outcome.Kind.REMOTE_SHUTDOWN: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.REMOTE_SHUTDOWN, *args),
-        face.RemoteShutdownError(*args)),
-    base.Outcome.Kind.RECEPTION_FAILURE: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args),
-        face.NetworkError(*args)),
-    base.Outcome.Kind.TRANSMISSION_FAILURE: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args),
-        face.NetworkError(*args)),
-    base.Outcome.Kind.LOCAL_FAILURE: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.LOCAL_FAILURE, *args),
-        face.LocalError(*args)),
-    base.Outcome.Kind.REMOTE_FAILURE: lambda *args: _Termination(
-        True, face.Abortion(face.Abortion.Kind.REMOTE_FAILURE, *args),
-        face.RemoteError(*args)),
-}
-
-
-def _wait_once_until(condition, until):
-  if until is None:
-    condition.wait()
-  else:
-    remaining = until - time.time()
-    if remaining < 0:
-      raise future.TimeoutError()
-    else:
-      condition.wait(timeout=remaining)
-
-
-def _done_callback_as_operation_termination_callback(
-    done_callback, rendezvous):
-  def operation_termination_callback(operation_outcome):
-    rendezvous.set_outcome(operation_outcome)
-    done_callback(rendezvous)
-  return operation_termination_callback
-
-
-def _abortion_callback_as_operation_termination_callback(
-    rpc_abortion_callback, rendezvous_set_outcome):
-  def operation_termination_callback(operation_outcome):
-    termination = rendezvous_set_outcome(operation_outcome)
-    if termination.abortion is not None:
-      rpc_abortion_callback(termination.abortion)
-  return operation_termination_callback
-
-
-class Rendezvous(base.Operator, future.Future, stream.Consumer, face.Call):
-  """A rendez-vous for the threads of an operation.
-
-  Instances of this object present iterator and stream.Consumer interfaces for
-  interacting with application code and present a base.Operator interface and
-  maintain a base.Operator internally for interacting with base interface code.
-  """
-
-  def __init__(self, operator, operation_context):
-    self._condition = threading.Condition()
-
-    self._operator = operator
-    self._operation_context = operation_context
-
-    self._protocol_context = _NOT_YET_ARRIVED
-
-    self._up_initial_metadata = _NOT_YET_ARRIVED
-    self._up_payload = None
-    self._up_allowance = 1
-    self._up_completion = _NOT_YET_ARRIVED
-    self._down_initial_metadata = _NOT_YET_SEEN
-    self._down_payload = None
-    self._down_allowance = 1
-    self._down_terminal_metadata = _NOT_YET_SEEN
-    self._down_code = _NOT_YET_SEEN
-    self._down_details = _NOT_YET_SEEN
-
-    self._termination = _NOT_TERMINATED
-
-    # The semantics of future.Future.cancel and future.Future.cancelled are
-    # slightly wonky, so they have to be tracked separately from the rest of the
-    # result of the RPC. This field tracks whether cancellation was requested
-    # prior to termination of the RPC
-    self._cancelled = False
-
-  def set_operator_and_context(self, operator, operation_context):
-    with self._condition:
-      self._operator = operator
-      self._operation_context = operation_context
-
-  def _down_completion(self):
-    if self._down_terminal_metadata.kind is _Transitory.Kind.NOT_YET_SEEN:
-      terminal_metadata = None
-      self._down_terminal_metadata = _GONE
-    elif self._down_terminal_metadata.kind is _Transitory.Kind.PRESENT:
-      terminal_metadata = self._down_terminal_metadata.value
-      self._down_terminal_metadata = _GONE
-    else:
-      terminal_metadata = None
-    if self._down_code.kind is _Transitory.Kind.NOT_YET_SEEN:
-      code = None
-      self._down_code = _GONE
-    elif self._down_code.kind is _Transitory.Kind.PRESENT:
-      code = self._down_code.value
-      self._down_code = _GONE
-    else:
-      code = None
-    if self._down_details.kind is _Transitory.Kind.NOT_YET_SEEN:
-      details = None
-      self._down_details = _GONE
-    elif self._down_details.kind is _Transitory.Kind.PRESENT:
-      details = self._down_details.value
-      self._down_details = _GONE
-    else:
-      details = None
-    return utilities.completion(terminal_metadata, code, details)
-
-  def _set_outcome(self, outcome):
-    if not self._termination.terminated:
-      self._operator = _DUMMY_OPERATOR
-      self._operation_context = None
-      self._down_initial_metadata = _GONE
-      self._down_payload = None
-      self._down_terminal_metadata = _GONE
-      self._down_code = _GONE
-      self._down_details = _GONE
-
-      if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
-        initial_metadata = None
-      else:
-        initial_metadata = self._up_initial_metadata.value
-      if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED:
-        terminal_metadata = None
-      else:
-        terminal_metadata = self._up_completion.value.terminal_metadata
-      if outcome.kind is base.Outcome.Kind.COMPLETED:
-        code = self._up_completion.value.code
-        details = self._up_completion.value.message
-      else:
-        code = outcome.code
-        details = outcome.details
-      self._termination = _OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR[
-          outcome.kind](initial_metadata, terminal_metadata, code, details)
-
-      self._condition.notify_all()
-
-    return self._termination
-
-  def advance(
-      self, initial_metadata=None, payload=None, completion=None,
-      allowance=None):
-    with self._condition:
-      if initial_metadata is not None:
-        self._up_initial_metadata = _Awaited(
-            _Awaited.Kind.ARRIVED, initial_metadata)
-      if payload is not None:
-        if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
-          self._up_initial_metadata = _ARRIVED_AND_NONE
-        self._up_payload = payload
-        self._up_allowance -= 1
-      if completion is not None:
-        if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
-          self._up_initial_metadata = _ARRIVED_AND_NONE
-        self._up_completion = _Awaited(
-            _Awaited.Kind.ARRIVED, completion)
-      if allowance is not None:
-        if self._down_payload is not None:
-          self._operator.advance(payload=self._down_payload)
-          self._down_payload = None
-          self._down_allowance += allowance - 1
-        else:
-          self._down_allowance += allowance
-      self._condition.notify_all()
-
-  def cancel(self):
-    with self._condition:
-      if self._operation_context is not None:
-        self._operation_context.cancel()
-        self._cancelled = True
-      return False
-
-  def cancelled(self):
-    with self._condition:
-      return self._cancelled
-
-  def running(self):
-    with self._condition:
-      return not self._termination.terminated
-
-  def done(self):
-    with self._condition:
-      return self._termination.terminated
-
-  def result(self, timeout=None):
-    until = None if timeout is None else time.time() + timeout
-    with self._condition:
-      while True:
-        if self._termination.terminated:
-          if self._termination.abortion is None:
-            return self._up_payload
-          elif self._termination.abortion.kind is face.Abortion.Kind.CANCELLED:
-            raise future.CancelledError()
-          else:
-            raise self._termination.abortion_error  # pylint: disable=raising-bad-type
-        else:
-          _wait_once_until(self._condition, until)
-
-  def exception(self, timeout=None):
-    until = None if timeout is None else time.time() + timeout
-    with self._condition:
-      while True:
-        if self._termination.terminated:
-          if self._termination.abortion is None:
-            return None
-          else:
-            return self._termination.abortion_error
-        else:
-          _wait_once_until(self._condition, until)
-
-  def traceback(self, timeout=None):
-    until = None if timeout is None else time.time() + timeout
-    with self._condition:
-      while True:
-        if self._termination.terminated:
-          if self._termination.abortion_error is None:
-            return None
-          else:
-            abortion_error = self._termination.abortion_error
-            break
-        else:
-          _wait_once_until(self._condition, until)
-
-    try:
-      raise abortion_error
-    except face.AbortionError:
-      return sys.exc_info()[2]
-
-  def add_done_callback(self, fn):
-    with self._condition:
-      if self._operation_context is not None:
-        outcome = self._operation_context.add_termination_callback(
-            _done_callback_as_operation_termination_callback(fn, self))
-        if outcome is None:
-          return
-        else:
-          self._set_outcome(outcome)
-
-    fn(self)
-
-  def consume(self, value):
-    with self._condition:
-      while True:
-        if self._termination.terminated:
-          return
-        elif 0 < self._down_allowance:
-          self._operator.advance(payload=value)
-          self._down_allowance -= 1
-          return
-        else:
-          self._condition.wait()
-
-  def terminate(self):
-    with self._condition:
-      if self._termination.terminated:
-        return
-      elif self._down_code.kind is _Transitory.Kind.GONE:
-        # Conform to specified idempotence of terminate by ignoring extra calls.
-        return
-      else:
-        completion = self._down_completion()
-        self._operator.advance(completion=completion)
-
-  def consume_and_terminate(self, value):
-    with self._condition:
-      while True:
-        if self._termination.terminated:
-          return
-        elif 0 < self._down_allowance:
-          completion = self._down_completion()
-          self._operator.advance(payload=value, completion=completion)
-          return
-        else:
-          self._condition.wait()
-
-  def __iter__(self):
-    return self
-
-  def __next__(self):
-    return self.next()
-
-  def next(self):
-    with self._condition:
-      while True:
-        if self._termination.abortion_error is not None:
-          raise self._termination.abortion_error
-        elif self._up_payload is not None:
-          payload = self._up_payload
-          self._up_payload = None
-          if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED:
-            self._operator.advance(allowance=1)
-          return payload
-        elif self._up_completion.kind is _Awaited.Kind.ARRIVED:
-          raise StopIteration()
-        else:
-          self._condition.wait()
-
-  def is_active(self):
-    with self._condition:
-      return not self._termination.terminated
-
-  def time_remaining(self):
-    if self._operation_context is None:
-      return 0
-    else:
-      return self._operation_context.time_remaining()
-
-  def add_abortion_callback(self, abortion_callback):
-    with self._condition:
-      if self._operation_context is None:
-        return self._termination.abortion
-      else:
-        outcome = self._operation_context.add_termination_callback(
-            _abortion_callback_as_operation_termination_callback(
-                abortion_callback, self.set_outcome))
-        if outcome is not None:
-          return self._set_outcome(outcome).abortion
-        else:
-          return self._termination.abortion
-
-  def protocol_context(self):
-    with self._condition:
-      while True:
-        if self._protocol_context.kind is _Awaited.Kind.ARRIVED:
-          return self._protocol_context.value
-        elif self._termination.abortion_error is not None:
-          raise self._termination.abortion_error
-        else:
-          self._condition.wait()
-
-  def initial_metadata(self):
-    with self._condition:
-      while True:
-        if self._up_initial_metadata.kind is _Awaited.Kind.ARRIVED:
-          return self._up_initial_metadata.value
-        elif self._termination.terminated:
-          return None
-        else:
-          self._condition.wait()
-
-  def terminal_metadata(self):
-    with self._condition:
-      while True:
-        if self._up_completion.kind is _Awaited.Kind.ARRIVED:
-          return self._up_completion.value.terminal_metadata
-        elif self._termination.terminated:
-          return None
-        else:
-          self._condition.wait()
-
-  def code(self):
-    with self._condition:
-      while True:
-        if self._up_completion.kind is _Awaited.Kind.ARRIVED:
-          return self._up_completion.value.code
-        elif self._termination.terminated:
-          return None
-        else:
-          self._condition.wait()
-
-  def details(self):
-    with self._condition:
-      while True:
-        if self._up_completion.kind is _Awaited.Kind.ARRIVED:
-          return self._up_completion.value.message
-        elif self._termination.terminated:
-          return None
-        else:
-          self._condition.wait()
-
-  def set_initial_metadata(self, initial_metadata):
-    with self._condition:
-      if (self._down_initial_metadata.kind is not
-          _Transitory.Kind.NOT_YET_SEEN):
-        raise ValueError(_CANNOT_SET_INITIAL_METADATA)
-      else:
-        self._down_initial_metadata = _GONE
-        self._operator.advance(initial_metadata=initial_metadata)
-
-  def set_terminal_metadata(self, terminal_metadata):
-    with self._condition:
-      if (self._down_terminal_metadata.kind is not
-          _Transitory.Kind.NOT_YET_SEEN):
-        raise ValueError(_CANNOT_SET_TERMINAL_METADATA)
-      else:
-        self._down_terminal_metadata = _Transitory(
-            _Transitory.Kind.PRESENT, terminal_metadata)
-
-  def set_code(self, code):
-    with self._condition:
-      if self._down_code.kind is not _Transitory.Kind.NOT_YET_SEEN:
-        raise ValueError(_CANNOT_SET_CODE)
-      else:
-        self._down_code = _Transitory(_Transitory.Kind.PRESENT, code)
-
-  def set_details(self, details):
-    with self._condition:
-      if self._down_details.kind is not _Transitory.Kind.NOT_YET_SEEN:
-        raise ValueError(_CANNOT_SET_DETAILS)
-      else:
-        self._down_details = _Transitory(_Transitory.Kind.PRESENT, details)
-
-  def set_protocol_context(self, protocol_context):
-    with self._condition:
-      self._protocol_context = _Awaited(
-          _Awaited.Kind.ARRIVED, protocol_context)
-      self._condition.notify_all()
-
-  def set_outcome(self, outcome):
-    with self._condition:
-      return self._set_outcome(outcome)
-
-
-class _ProtocolReceiver(base.ProtocolReceiver):
-
-  def __init__(self, rendezvous):
-    self._rendezvous = rendezvous
-
-  def context(self, protocol_context):
-    self._rendezvous.set_protocol_context(protocol_context)
-
-
-def protocol_receiver(rendezvous):
-  return _ProtocolReceiver(rendezvous)
-
-
-def pool_wrap(behavior, operation_context):
-  """Wraps an operation-related behavior so that it may be called in a pool.
-
-  Args:
-    behavior: A callable related to carrying out an operation.
-    operation_context: A base_interfaces.OperationContext for the operation.
-
-  Returns:
-    A callable that when called carries out the behavior of the given callable
-      and handles whatever exceptions it raises appropriately.
-  """
-  def translation(*args):
-    try:
-      behavior(*args)
-    except (
-        abandonment.Abandoned,
-        face.CancellationError,
-        face.ExpirationError,
-        face.LocalShutdownError,
-        face.RemoteShutdownError,
-        face.NetworkError,
-        face.RemoteError,
-    ) as e:
-      if operation_context.outcome() is None:
-        operation_context.fail(e)
-    except Exception as e:
-      operation_context.fail(e)
-  return callable_util.with_exceptions_logged(
-      translation, _INTERNAL_ERROR_LOG_MESSAGE)
diff --git a/src/python/grpcio/grpc/framework/crust/_service.py b/src/python/grpcio/grpc/framework/crust/_service.py
deleted file mode 100644
index 9903415c09944a25ea14930b43261d5cf3be90fc..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/crust/_service.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Behaviors for servicing RPCs."""
-
-from grpc.framework.crust import _control
-from grpc.framework.foundation import abandonment
-from grpc.framework.interfaces.base import utilities
-from grpc.framework.interfaces.face import face
-
-
-class _ServicerContext(face.ServicerContext):
-
-  def __init__(self, rendezvous):
-    self._rendezvous = rendezvous
-
-  def is_active(self):
-    return self._rendezvous.is_active()
-
-  def time_remaining(self):
-    return self._rendezvous.time_remaining()
-
-  def add_abortion_callback(self, abortion_callback):
-    return self._rendezvous.add_abortion_callback(abortion_callback)
-
-  def cancel(self):
-    self._rendezvous.cancel()
-
-  def protocol_context(self):
-    return self._rendezvous.protocol_context()
-
-  def invocation_metadata(self):
-    return self._rendezvous.initial_metadata()
-
-  def initial_metadata(self, initial_metadata):
-    self._rendezvous.set_initial_metadata(initial_metadata)
-
-  def terminal_metadata(self, terminal_metadata):
-    self._rendezvous.set_terminal_metadata(terminal_metadata)
-
-  def code(self, code):
-    self._rendezvous.set_code(code)
-
-  def details(self, details):
-    self._rendezvous.set_details(details)
-
-
-def _adaptation(pool, in_pool):
-  def adaptation(operator, operation_context):
-    rendezvous = _control.Rendezvous(operator, operation_context)
-    subscription = utilities.full_subscription(
-        rendezvous, _control.protocol_receiver(rendezvous))
-    outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
-    if outcome is None:
-      pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous)
-      return subscription
-    else:
-      raise abandonment.Abandoned()
-  return adaptation
-
-
-def adapt_inline_unary_unary(method, pool):
-  def in_pool(rendezvous):
-    request = next(rendezvous)
-    response = method(request, _ServicerContext(rendezvous))
-    rendezvous.consume_and_terminate(response)
-  return _adaptation(pool, in_pool)
-
-
-def adapt_inline_unary_stream(method, pool):
-  def in_pool(rendezvous):
-    request = next(rendezvous)
-    response_iterator = method(request, _ServicerContext(rendezvous))
-    for response in response_iterator:
-      rendezvous.consume(response)
-    rendezvous.terminate()
-  return _adaptation(pool, in_pool)
-
-
-def adapt_inline_stream_unary(method, pool):
-  def in_pool(rendezvous):
-    response = method(rendezvous, _ServicerContext(rendezvous))
-    rendezvous.consume_and_terminate(response)
-  return _adaptation(pool, in_pool)
-
-
-def adapt_inline_stream_stream(method, pool):
-  def in_pool(rendezvous):
-    response_iterator = method(rendezvous, _ServicerContext(rendezvous))
-    for response in response_iterator:
-      rendezvous.consume(response)
-    rendezvous.terminate()
-  return _adaptation(pool, in_pool)
-
-
-def adapt_event_unary_unary(method, pool):
-  def in_pool(rendezvous):
-    request = next(rendezvous)
-    method(
-        request, rendezvous.consume_and_terminate, _ServicerContext(rendezvous))
-  return _adaptation(pool, in_pool)
-
-
-def adapt_event_unary_stream(method, pool):
-  def in_pool(rendezvous):
-    request = next(rendezvous)
-    method(request, rendezvous, _ServicerContext(rendezvous))
-  return _adaptation(pool, in_pool)
-
-
-def adapt_event_stream_unary(method, pool):
-  def in_pool(rendezvous):
-    request_consumer = method(
-        rendezvous.consume_and_terminate, _ServicerContext(rendezvous))
-    for request in rendezvous:
-      request_consumer.consume(request)
-    request_consumer.terminate()
-  return _adaptation(pool, in_pool)
-
-
-def adapt_event_stream_stream(method, pool):
-  def in_pool(rendezvous):
-    request_consumer = method(rendezvous, _ServicerContext(rendezvous))
-    for request in rendezvous:
-      request_consumer.consume(request)
-    request_consumer.terminate()
-  return _adaptation(pool, in_pool)
-
-
-def adapt_multi_method(multi_method, pool):
-  def adaptation(group, method, operator, operation_context):
-    rendezvous = _control.Rendezvous(operator, operation_context)
-    subscription = utilities.full_subscription(
-        rendezvous, _control.protocol_receiver(rendezvous))
-    outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
-    if outcome is None:
-      def in_pool():
-        request_consumer = multi_method.service(
-            group, method, rendezvous, _ServicerContext(rendezvous))
-        for request in rendezvous:
-          request_consumer.consume(request)
-        request_consumer.terminate()
-      pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous)
-      return subscription
-    else:
-      raise abandonment.Abandoned()
-  return adaptation
diff --git a/src/python/grpcio/grpc/framework/crust/implementations.py b/src/python/grpcio/grpc/framework/crust/implementations.py
deleted file mode 100644
index 2d3ab733b6a92280a7e1720cc4d3fb0d672bef6f..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/crust/implementations.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Entry points into the Crust layer of RPC Framework."""
-
-import six
-
-from grpc.framework.common import cardinality
-from grpc.framework.common import style
-from grpc.framework.crust import _calls
-from grpc.framework.crust import _service
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.face import face
-
-
-class _BaseServicer(base.Servicer):
-
-  def __init__(self, adapted_methods, adapted_multi_method):
-    self._adapted_methods = adapted_methods
-    self._adapted_multi_method = adapted_multi_method
-
-  def service(self, group, method, context, output_operator):
-    adapted_method = self._adapted_methods.get((group, method), None)
-    if adapted_method is not None:
-      return adapted_method(output_operator, context)
-    elif self._adapted_multi_method is not None:
-      try:
-        return self._adapted_multi_method(
-            group, method, output_operator, context)
-      except face.NoSuchMethodError:
-        raise base.NoSuchMethodError(None, None)
-    else:
-      raise base.NoSuchMethodError(None, None)
-
-
-class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
-
-  def __init__(self, end, group, method, pool):
-    self._end = end
-    self._group = group
-    self._method = method
-    self._pool = pool
-
-  def __call__(
-      self, request, timeout, metadata=None, with_call=False,
-      protocol_options=None):
-    return _calls.blocking_unary_unary(
-        self._end, self._group, self._method, timeout, with_call,
-        protocol_options, metadata, request)
-
-  def future(self, request, timeout, metadata=None, protocol_options=None):
-    return _calls.future_unary_unary(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, request)
-
-  def event(
-      self, request, receiver, abortion_callback, timeout,
-      metadata=None, protocol_options=None):
-    return _calls.event_unary_unary(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, request, receiver, abortion_callback, self._pool)
-
-
-class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
-
-  def __init__(self, end, group, method, pool):
-    self._end = end
-    self._group = group
-    self._method = method
-    self._pool = pool
-
-  def __call__(self, request, timeout, metadata=None, protocol_options=None):
-    return _calls.inline_unary_stream(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, request)
-
-  def event(
-      self, request, receiver, abortion_callback, timeout,
-      metadata=None, protocol_options=None):
-    return _calls.event_unary_stream(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, request, receiver, abortion_callback, self._pool)
-
-
-class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
-
-  def __init__(self, end, group, method, pool):
-    self._end = end
-    self._group = group
-    self._method = method
-    self._pool = pool
-
-  def __call__(
-      self, request_iterator, timeout, metadata=None,
-      with_call=False, protocol_options=None):
-    return _calls.blocking_stream_unary(
-        self._end, self._group, self._method, timeout, with_call,
-        protocol_options, metadata, request_iterator, self._pool)
-
-  def future(
-      self, request_iterator, timeout, metadata=None, protocol_options=None):
-    return _calls.future_stream_unary(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, request_iterator, self._pool)
-
-  def event(
-      self, receiver, abortion_callback, timeout, metadata=None,
-      protocol_options=None):
-    return _calls.event_stream_unary(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, receiver, abortion_callback, self._pool)
-
-
-class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
-
-  def __init__(self, end, group, method, pool):
-    self._end = end
-    self._group = group
-    self._method = method
-    self._pool = pool
-
-  def __call__(
-      self, request_iterator, timeout, metadata=None, protocol_options=None):
-    return _calls.inline_stream_stream(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, request_iterator, self._pool)
-
-  def event(
-      self, receiver, abortion_callback, timeout, metadata=None,
-      protocol_options=None):
-    return _calls.event_stream_stream(
-        self._end, self._group, self._method, timeout, protocol_options,
-        metadata, receiver, abortion_callback, self._pool)
-
-
-class _GenericStub(face.GenericStub):
-  """An face.GenericStub implementation."""
-
-  def __init__(self, end, pool):
-    self._end = end
-    self._pool = pool
-
-  def blocking_unary_unary(
-      self, group, method, request, timeout, metadata=None,
-      with_call=None, protocol_options=None):
-    return _calls.blocking_unary_unary(
-        self._end, group, method, timeout, with_call, protocol_options,
-        metadata, request)
-
-  def future_unary_unary(
-      self, group, method, request, timeout, metadata=None,
-      protocol_options=None):
-    return _calls.future_unary_unary(
-        self._end, group, method, timeout, protocol_options, metadata, request)
-
-  def inline_unary_stream(
-      self, group, method, request, timeout, metadata=None,
-      protocol_options=None):
-    return _calls.inline_unary_stream(
-        self._end, group, method, timeout, protocol_options, metadata, request)
-
-  def blocking_stream_unary(
-      self, group, method, request_iterator, timeout, metadata=None,
-      with_call=None, protocol_options=None):
-    return _calls.blocking_stream_unary(
-        self._end, group, method, timeout, with_call, protocol_options,
-        metadata, request_iterator, self._pool)
-
-  def future_stream_unary(
-      self, group, method, request_iterator, timeout, metadata=None,
-      protocol_options=None):
-    return _calls.future_stream_unary(
-        self._end, group, method, timeout, protocol_options, metadata,
-        request_iterator, self._pool)
-
-  def inline_stream_stream(
-      self, group, method, request_iterator, timeout, metadata=None,
-      protocol_options=None):
-    return _calls.inline_stream_stream(
-        self._end, group, method, timeout, protocol_options, metadata,
-        request_iterator, self._pool)
-
-  def event_unary_unary(
-      self, group, method, request, receiver, abortion_callback, timeout,
-      metadata=None, protocol_options=None):
-    return _calls.event_unary_unary(
-        self._end, group, method, timeout, protocol_options, metadata, request,
-        receiver, abortion_callback, self._pool)
-
-  def event_unary_stream(
-      self, group, method, request, receiver, abortion_callback, timeout,
-      metadata=None, protocol_options=None):
-    return _calls.event_unary_stream(
-        self._end, group, method, timeout, protocol_options, metadata, request,
-        receiver, abortion_callback, self._pool)
-
-  def event_stream_unary(
-      self, group, method, receiver, abortion_callback, timeout,
-      metadata=None, protocol_options=None):
-    return _calls.event_stream_unary(
-        self._end, group, method, timeout, protocol_options, metadata, receiver,
-        abortion_callback, self._pool)
-
-  def event_stream_stream(
-      self, group, method, receiver, abortion_callback, timeout,
-      metadata=None, protocol_options=None):
-    return _calls.event_stream_stream(
-        self._end, group, method, timeout, protocol_options, metadata, receiver,
-        abortion_callback, self._pool)
-
-  def unary_unary(self, group, method):
-    return _UnaryUnaryMultiCallable(self._end, group, method, self._pool)
-
-  def unary_stream(self, group, method):
-    return _UnaryStreamMultiCallable(self._end, group, method, self._pool)
-
-  def stream_unary(self, group, method):
-    return _StreamUnaryMultiCallable(self._end, group, method, self._pool)
-
-  def stream_stream(self, group, method):
-    return _StreamStreamMultiCallable(self._end, group, method, self._pool)
-
-
-class _DynamicStub(face.DynamicStub):
-  """An face.DynamicStub implementation."""
-
-  def __init__(self, end, group, cardinalities, pool):
-    self._end = end
-    self._group = group
-    self._cardinalities = cardinalities
-    self._pool = pool
-
-  def __getattr__(self, attr):
-    method_cardinality = self._cardinalities.get(attr)
-    if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
-      return _UnaryUnaryMultiCallable(self._end, self._group, attr, self._pool)
-    elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
-      return _UnaryStreamMultiCallable(self._end, self._group, attr, self._pool)
-    elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
-      return _StreamUnaryMultiCallable(self._end, self._group, attr, self._pool)
-    elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
-      return _StreamStreamMultiCallable(
-          self._end, self._group, attr, self._pool)
-    else:
-      raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr)
-
-
-def _adapt_method_implementations(method_implementations, pool):
-  adapted_implementations = {}
-  for name, method_implementation in six.iteritems(method_implementations):
-    if method_implementation.style is style.Service.INLINE:
-      if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
-        adapted_implementations[name] = _service.adapt_inline_unary_unary(
-            method_implementation.unary_unary_inline, pool)
-      elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
-        adapted_implementations[name] = _service.adapt_inline_unary_stream(
-            method_implementation.unary_stream_inline, pool)
-      elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
-        adapted_implementations[name] = _service.adapt_inline_stream_unary(
-            method_implementation.stream_unary_inline, pool)
-      elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
-        adapted_implementations[name] = _service.adapt_inline_stream_stream(
-            method_implementation.stream_stream_inline, pool)
-    elif method_implementation.style is style.Service.EVENT:
-      if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
-        adapted_implementations[name] = _service.adapt_event_unary_unary(
-            method_implementation.unary_unary_event, pool)
-      elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
-        adapted_implementations[name] = _service.adapt_event_unary_stream(
-            method_implementation.unary_stream_event, pool)
-      elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
-        adapted_implementations[name] = _service.adapt_event_stream_unary(
-            method_implementation.stream_unary_event, pool)
-      elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
-        adapted_implementations[name] = _service.adapt_event_stream_stream(
-            method_implementation.stream_stream_event, pool)
-  return adapted_implementations
-
-
-def servicer(method_implementations, multi_method_implementation, pool):
-  """Creates a base.Servicer.
-
-  It is guaranteed that any passed face.MultiMethodImplementation will
-  only be called to service an RPC if there is no
-  face.MethodImplementation for the RPC method in the passed
-  method_implementations dictionary.
-
-  Args:
-    method_implementations: A dictionary from RPC method name to
-      face.MethodImplementation object to be used to service the named
-      RPC method.
-    multi_method_implementation: An face.MultiMethodImplementation to be
-      used to service any RPCs not serviced by the
-      face.MethodImplementations given in the method_implementations
-      dictionary, or None.
-    pool: A thread pool.
-
-  Returns:
-    A base.Servicer that services RPCs via the given implementations.
-  """
-  adapted_implementations = _adapt_method_implementations(
-      method_implementations, pool)
-  if multi_method_implementation is None:
-    adapted_multi_method_implementation = None
-  else:
-    adapted_multi_method_implementation = _service.adapt_multi_method(
-        multi_method_implementation, pool)
-  return _BaseServicer(
-      adapted_implementations, adapted_multi_method_implementation)
-
-
-def generic_stub(end, pool):
-  """Creates an face.GenericStub.
-
-  Args:
-    end: A base.End.
-    pool: A futures.ThreadPoolExecutor.
-
-  Returns:
-    A face.GenericStub that performs RPCs via the given base.End.
-  """
-  return _GenericStub(end, pool)
-
-
-def dynamic_stub(end, group, cardinalities, pool):
-  """Creates an face.DynamicStub.
-
-  Args:
-    end: A base.End.
-    group: The group identifier for all RPCs to be made with the created
-      face.DynamicStub.
-    cardinalities: A dict from method identifier to cardinality.Cardinality
-      value identifying the cardinality of every RPC method to be supported by
-      the created face.DynamicStub.
-    pool: A futures.ThreadPoolExecutor.
-
-  Returns:
-    A face.DynamicStub that performs RPCs via the given base.End.
-  """
-  return _DynamicStub(end, group, cardinalities, pool)
diff --git a/src/python/grpcio/grpc/framework/foundation/_timer_future.py b/src/python/grpcio/grpc/framework/foundation/_timer_future.py
deleted file mode 100644
index 2c9996aa9db7ab79172586a61b0b7af9a9f6cab3..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/foundation/_timer_future.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Affords a Future implementation based on Python's threading.Timer."""
-
-import sys
-import threading
-import time
-
-from grpc.framework.foundation import future
-
-
-class TimerFuture(future.Future):
-  """A Future implementation based around Timer objects."""
-
-  def __init__(self, compute_time, computation):
-    """Constructor.
-
-    Args:
-      compute_time: The time after which to begin this future's computation.
-      computation: The computation to be performed within this Future.
-    """
-    self._lock = threading.Lock()
-    self._compute_time = compute_time
-    self._computation = computation
-    self._timer = None
-    self._computing = False
-    self._computed = False
-    self._cancelled = False
-    self._result = None
-    self._exception = None
-    self._traceback = None
-    self._waiting = []
-
-  def _compute(self):
-    """Performs the computation embedded in this Future.
-
-    Or doesn't, if the time to perform it has not yet arrived.
-    """
-    with self._lock:
-      time_remaining = self._compute_time - time.time()
-      if 0 < time_remaining:
-        self._timer = threading.Timer(time_remaining, self._compute)
-        self._timer.start()
-        return
-      else:
-        self._computing = True
-
-    try:
-      return_value = self._computation()
-      exception = None
-      traceback = None
-    except Exception as e:  # pylint: disable=broad-except
-      return_value = None
-      exception = e
-      traceback = sys.exc_info()[2]
-
-    with self._lock:
-      self._computing = False
-      self._computed = True
-      self._return_value = return_value
-      self._exception = exception
-      self._traceback = traceback
-      waiting = self._waiting
-
-    for callback in waiting:
-      callback(self)
-
-  def start(self):
-    """Starts this Future.
-
-    This must be called exactly once, immediately after construction.
-    """
-    with self._lock:
-      self._timer = threading.Timer(
-          self._compute_time - time.time(), self._compute)
-      self._timer.start()
-
-  def cancel(self):
-    """See future.Future.cancel for specification."""
-    with self._lock:
-      if self._computing or self._computed:
-        return False
-      elif self._cancelled:
-        return True
-      else:
-        self._timer.cancel()
-        self._cancelled = True
-        waiting = self._waiting
-
-    for callback in waiting:
-      try:
-        callback(self)
-      except Exception:  # pylint: disable=broad-except
-        pass
-
-    return True
-
-  def cancelled(self):
-    """See future.Future.cancelled for specification."""
-    with self._lock:
-      return self._cancelled
-
-  def running(self):
-    """See future.Future.running for specification."""
-    with self._lock:
-      return not self._computed and not self._cancelled
-
-  def done(self):
-    """See future.Future.done for specification."""
-    with self._lock:
-      return self._computed or self._cancelled
-
-  def result(self, timeout=None):
-    """See future.Future.result for specification."""
-    with self._lock:
-      if self._cancelled:
-        raise future.CancelledError()
-      elif self._computed:
-        if self._exception is None:
-          return self._return_value
-        else:
-          raise self._exception  # pylint: disable=raising-bad-type
-
-      condition = threading.Condition()
-      def notify_condition(unused_future):
-        with condition:
-          condition.notify()
-      self._waiting.append(notify_condition)
-
-    with condition:
-      condition.wait(timeout=timeout)
-
-    with self._lock:
-      if self._cancelled:
-        raise future.CancelledError()
-      elif self._computed:
-        if self._exception is None:
-          return self._return_value
-        else:
-          raise self._exception  # pylint: disable=raising-bad-type
-      else:
-        raise future.TimeoutError()
-
-  def exception(self, timeout=None):
-    """See future.Future.exception for specification."""
-    with self._lock:
-      if self._cancelled:
-        raise future.CancelledError()
-      elif self._computed:
-        return self._exception
-
-      condition = threading.Condition()
-      def notify_condition(unused_future):
-        with condition:
-          condition.notify()
-      self._waiting.append(notify_condition)
-
-    with condition:
-      condition.wait(timeout=timeout)
-
-    with self._lock:
-      if self._cancelled:
-        raise future.CancelledError()
-      elif self._computed:
-        return self._exception
-      else:
-        raise future.TimeoutError()
-
-  def traceback(self, timeout=None):
-    """See future.Future.traceback for specification."""
-    with self._lock:
-      if self._cancelled:
-        raise future.CancelledError()
-      elif self._computed:
-        return self._traceback
-
-      condition = threading.Condition()
-      def notify_condition(unused_future):
-        with condition:
-          condition.notify()
-      self._waiting.append(notify_condition)
-
-    with condition:
-      condition.wait(timeout=timeout)
-
-    with self._lock:
-      if self._cancelled:
-        raise future.CancelledError()
-      elif self._computed:
-        return self._traceback
-      else:
-        raise future.TimeoutError()
-
-  def add_done_callback(self, fn):
-    """See future.Future.add_done_callback for specification."""
-    with self._lock:
-      if not self._computed and not self._cancelled:
-        self._waiting.append(fn)
-        return
-
-    fn(self)
diff --git a/src/python/grpcio/grpc/framework/foundation/relay.py b/src/python/grpcio/grpc/framework/foundation/relay.py
deleted file mode 100644
index 20f41b2738808e8dda7cd430c7cfb5beacd0e63f..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/foundation/relay.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Implementations of in-order work deference."""
-
-import abc
-import enum
-import threading
-
-from grpc.framework.foundation import activated
-from grpc.framework.foundation import logging_pool
-
-_NULL_BEHAVIOR = lambda unused_value: None
-
-
-class Relay(object):
-  """Performs work submitted to it in another thread.
-
-  Performs work in the order in which work was submitted to it; otherwise there
-  would be no reason to use an implementation of this interface instead of a
-  thread pool.
-  """
-
-  @abc.abstractmethod
-  def add_value(self, value):
-    """Adds a value to be passed to the behavior registered with this Relay.
-
-    Args:
-      value: A value that will be passed to a call made in another thread to the
-        behavior registered with this Relay.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def set_behavior(self, behavior):
-    """Sets the behavior that this Relay should call when passed values.
-
-    Args:
-      behavior: The behavior that this Relay should call in another thread when
-        passed a value, or None to have passed values ignored.
-    """
-    raise NotImplementedError()
-
-
-class _PoolRelay(activated.Activated, Relay):
-
-  @enum.unique
-  class _State(enum.Enum):
-    INACTIVE = 'inactive'
-    IDLE = 'idle'
-    SPINNING = 'spinning'
-
-  def __init__(self, pool, behavior):
-    self._condition = threading.Condition()
-    self._pool = pool
-    self._own_pool = pool is None
-    self._state = _PoolRelay._State.INACTIVE
-    self._activated = False
-    self._spinning = False
-    self._values = []
-    self._behavior = _NULL_BEHAVIOR if behavior is None else behavior
-
-  def _spin(self, behavior, value):
-    while True:
-      behavior(value)
-      with self._condition:
-        if self._values:
-          value = self._values.pop(0)
-          behavior = self._behavior
-        else:
-          self._state = _PoolRelay._State.IDLE
-          self._condition.notify_all()
-          break
-
-  def add_value(self, value):
-    with self._condition:
-      if self._state is _PoolRelay._State.INACTIVE:
-        raise ValueError('add_value not valid on inactive Relay!')
-      elif self._state is _PoolRelay._State.IDLE:
-        self._pool.submit(self._spin, self._behavior, value)
-        self._state = _PoolRelay._State.SPINNING
-      else:
-        self._values.append(value)
-
-  def set_behavior(self, behavior):
-    with self._condition:
-      self._behavior = _NULL_BEHAVIOR if behavior is None else behavior
-
-  def _start(self):
-    with self._condition:
-      self._state = _PoolRelay._State.IDLE
-      if self._own_pool:
-        self._pool = logging_pool.pool(1)
-      return self
-
-  def _stop(self):
-    with self._condition:
-      while self._state is _PoolRelay._State.SPINNING:
-        self._condition.wait()
-      if self._own_pool:
-        self._pool.shutdown(wait=True)
-      self._state = _PoolRelay._State.INACTIVE
-
-  def __enter__(self):
-    return self._start()
-
-  def __exit__(self, exc_type, exc_val, exc_tb):
-    self._stop()
-    return False
-
-  def start(self):
-    return self._start()
-
-  def stop(self):
-    self._stop()
-
-
-def relay(behavior):
-  """Creates a Relay.
-
-  Args:
-    behavior: The behavior to be called by the created Relay, or None to have
-      passed values dropped until a different behavior is given to the returned
-      Relay later.
-
-  Returns:
-    An object that is both an activated.Activated and a Relay. The object is
-      only valid for use as a Relay when activated.
-  """
-  return _PoolRelay(None, behavior)
-
-
-def pool_relay(pool, behavior):
-  """Creates a Relay that uses a given thread pool.
-
-  This object will make use of at most one thread in the given pool.
-
-  Args:
-    pool: A futures.ThreadPoolExecutor for use by the created Relay.
-    behavior: The behavior to be called by the created Relay, or None to have
-      passed values dropped until a different behavior is given to the returned
-      Relay later.
-
-  Returns:
-    An object that is both an activated.Activated and a Relay. The object is
-      only valid for use as a Relay when activated.
-  """
-  return _PoolRelay(pool, behavior)
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/links.py b/src/python/grpcio/grpc/framework/interfaces/links/links.py
deleted file mode 100644
index 9631b19078e2a3a5d6db3b09073a548b65302888..0000000000000000000000000000000000000000
--- a/src/python/grpcio/grpc/framework/interfaces/links/links.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""The low-level ticket-exchanging-links interface of RPC Framework."""
-
-import abc
-import collections
-import enum
-
-import six
-
-
-class Protocol(collections.namedtuple('Protocol', ('kind', 'value',))):
-  """A sum type for handles to a system that transmits tickets.
-
-  Attributes:
-    kind: A Kind value identifying the kind of value being passed.
-    value: The value being passed between the high-level application and the
-      system affording ticket transport.
-  """
-
-  @enum.unique
-  class Kind(enum.Enum):
-    CALL_OPTION = 'call option'
-    SERVICER_CONTEXT = 'servicer context'
-    INVOCATION_CONTEXT = 'invocation context'
-
-
-class Ticket(
-    collections.namedtuple(
-        'Ticket',
-        ('operation_id', 'sequence_number', 'group', 'method', 'subscription',
-         'timeout', 'allowance', 'initial_metadata', 'payload',
-         'terminal_metadata', 'code', 'message', 'termination', 'protocol',))):
-  """A sum type for all values sent from a front to a back.
-
-  Attributes:
-    operation_id: A unique-with-respect-to-equality hashable object identifying
-      a particular operation.
-    sequence_number: A zero-indexed integer sequence number identifying the
-      ticket's place in the stream of tickets sent in one direction for the
-      particular operation.
-    group: The group to which the method of the operation belongs. Must be
-      present in the first ticket from invocation side to service side. Ignored
-      for all other tickets exchanged during the operation.
-    method: The name of an operation. Must be present in the first ticket from
-      invocation side to service side. Ignored for all other tickets exchanged
-      during the operation.
-    subscription: A Subscription value describing the interest one side has in
-      receiving information from the other side. Must be present in the first
-      ticket from either side. Ignored for all other tickets exchanged during
-      the operation.
-    timeout: A nonzero length of time (measured from the beginning of the
-      operation) to allow for the entire operation. Must be present in the first
-      ticket from invocation side to service side. Optional for all other
-      tickets exchanged during the operation. Receipt of a value from the other
-      side of the operation indicates the value in use by that side. Setting a
-      value on a later ticket allows either side to request time extensions (or
-      even time reductions!) on in-progress operations.
-    allowance: A positive integer granting permission for a number of payloads
-      to be transmitted to the communicating side of the operation, or None if
-      no additional allowance is being granted with this ticket.
-    initial_metadata: An optional metadata value communicated from one side to
-      the other at the beginning of the operation. May be non-None in at most
-      one ticket from each side. Any non-None value must appear no later than
-      the first payload value.
-    payload: A customer payload object. May be None.
-    terminal_metadata: A metadata value comminicated from one side to the other
-      at the end of the operation. May be non-None in the same ticket as
-      the code and message, but must be None for all earlier tickets.
-    code: A value communicated at operation completion. May be None.
-    message: A value communicated at operation completion. May be None.
-    termination: A Termination value describing the end of the operation, or
-      None if the operation has not yet terminated. If set, no further tickets
-      may be sent in the same direction.
-    protocol: A Protocol value or None, with further semantics being a matter
-      between high-level application and underlying ticket transport.
-  """
-
-  @enum.unique
-  class Subscription(enum.Enum):
-    """Identifies the level of subscription of a side of an operation."""
-
-    NONE = 'none'
-    TERMINATION = 'termination'
-    FULL = 'full'
-
-  @enum.unique
-  class Termination(enum.Enum):
-    """Identifies the termination of an operation."""
-
-    COMPLETION = 'completion'
-    CANCELLATION = 'cancellation'
-    EXPIRATION = 'expiration'
-    SHUTDOWN = 'shutdown'
-    RECEPTION_FAILURE = 'reception failure'
-    TRANSMISSION_FAILURE = 'transmission failure'
-    LOCAL_FAILURE = 'local failure'
-    REMOTE_FAILURE = 'remote failure'
-
-
-class Link(six.with_metaclass(abc.ABCMeta)):
-  """Accepts and emits tickets."""
-
-  @abc.abstractmethod
-  def accept_ticket(self, ticket):
-    """Accept a Ticket.
-
-    Args:
-      ticket: Any Ticket.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def join_link(self, link):
-    """Mates this object with a peer with which it will exchange tickets."""
-    raise NotImplementedError()
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 37dab6a791435b96493e12bb7b7375568e4cb6cf..4ec7c9ffff6b5555d2ee4c60b2c4b7e8e71a4881 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -94,6 +94,7 @@ CORE_SOURCE_FILES = [
   'src/core/lib/iomgr/endpoint_pair_posix.c',
   'src/core/lib/iomgr/endpoint_pair_windows.c',
   'src/core/lib/iomgr/error.c',
+  'src/core/lib/iomgr/ev_epoll_linux.c',
   'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
   'src/core/lib/iomgr/ev_poll_posix.c',
   'src/core/lib/iomgr/ev_posix.c',
@@ -104,6 +105,7 @@ CORE_SOURCE_FILES = [
   'src/core/lib/iomgr/iomgr_posix.c',
   'src/core/lib/iomgr/iomgr_windows.c',
   'src/core/lib/iomgr/load_file.c',
+  'src/core/lib/iomgr/network_status_tracker.c',
   'src/core/lib/iomgr/polling_entity.c',
   'src/core/lib/iomgr/pollset_set_windows.c',
   'src/core/lib/iomgr/pollset_windows.c',
diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py
index 0c13104d9da1c1b22a56466bf089879998ba39d7..ea38526a28131b979c0390f8667633a7a57d598f 100644
--- a/src/python/grpcio/grpc_version.py
+++ b/src/python/grpcio/grpc_version.py
@@ -29,4 +29,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
 
-VERSION='0.15.0.dev0'
+VERSION='1.1.0.dev0'
diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py
index 33244eb388ee2d5614464b78ab00af7663290d73..7730374df08204c923660ed44c604f5feb507d4b 100644
--- a/src/python/grpcio/support.py
+++ b/src/python/grpcio/support.py
@@ -50,7 +50,6 @@ Could not find <Python.h>. This could mean the following:
     (check your environment variables or try re-installing?)
   * You're on Windows and your Python installation was somehow corrupted
     (check your environment variables or try re-installing?)
-    * Note: Windows users should look into installing `vcpython27`.
 """
 
 C_CHECKS = {
diff --git a/src/python/grpcio/tests/unit/_adapter/.gitignore b/src/python/grpcio/tests/unit/_adapter/.gitignore
deleted file mode 100644
index a6f96cd6dbb189f379fbfdfaef86003fe1d56503..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/_adapter/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.a
-*.so
-*.dll
-*.pyc
-*.pyd
diff --git a/src/python/grpcio/tests/unit/_adapter/_proto_scenarios.py b/src/python/grpcio/tests/unit/_adapter/_proto_scenarios.py
deleted file mode 100644
index 7a90eacf77f7355dfa388debe7425d7565109942..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/_adapter/_proto_scenarios.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Test scenarios using protocol buffers."""
-
-import abc
-import threading
-
-import six
-
-from tests.unit._junkdrawer import math_pb2
-
-
-class ProtoScenario(six.with_metaclass(abc.ABCMeta)):
-  """An RPC test scenario using protocol buffers."""
-
-  @abc.abstractmethod
-  def method(self):
-    """Access the test method name.
-
-    Returns:
-      The test method name.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def serialize_request(self, request):
-    """Serialize a request protocol buffer.
-
-    Args:
-      request: A request protocol buffer.
-
-    Returns:
-      The bytestring serialization of the given request protocol buffer.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deserialize_request(self, request_bytestring):
-    """Deserialize a request protocol buffer.
-
-    Args:
-      request_bytestring: The bytestring serialization of a request protocol
-        buffer.
-
-    Returns:
-      The request protocol buffer deserialized from the given byte string.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def serialize_response(self, response):
-    """Serialize a response protocol buffer.
-
-    Args:
-      response: A response protocol buffer.
-
-    Returns:
-      The bytestring serialization of the given response protocol buffer.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deserialize_response(self, response_bytestring):
-    """Deserialize a response protocol buffer.
-
-    Args:
-      response_bytestring: The bytestring serialization of a response protocol
-        buffer.
-
-    Returns:
-      The response protocol buffer deserialized from the given byte string.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def requests(self):
-    """Access the sequence of requests for this scenario.
-
-    Returns:
-      A sequence of request protocol buffers.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def response_for_request(self, request):
-    """Access the response for a particular request.
-
-    Args:
-      request: A request protocol buffer.
-
-    Returns:
-      The response protocol buffer appropriate for the given request.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def verify_requests(self, experimental_requests):
-    """Verify the requests transmitted through the system under test.
-
-    Args:
-      experimental_requests: The request protocol buffers transmitted through
-        the system under test.
-
-    Returns:
-      True if the requests satisfy this test scenario; False otherwise.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def verify_responses(self, experimental_responses):
-    """Verify the responses transmitted through the system under test.
-
-    Args:
-      experimental_responses: The response protocol buffers transmitted through
-        the system under test.
-
-    Returns:
-      True if the responses satisfy this test scenario; False otherwise.
-    """
-    raise NotImplementedError()
-
-
-class EmptyScenario(ProtoScenario):
-  """A scenario that transmits no protocol buffers in either direction."""
-
-  def method(self):
-    return 'DivMany'
-
-  def serialize_request(self, request):
-    raise ValueError('This should not be necessary to call!')
-
-  def deserialize_request(self, request_bytestring):
-    raise ValueError('This should not be necessary to call!')
-
-  def serialize_response(self, response):
-    raise ValueError('This should not be necessary to call!')
-
-  def deserialize_response(self, response_bytestring):
-    raise ValueError('This should not be necessary to call!')
-
-  def requests(self):
-    return ()
-
-  def response_for_request(self, request):
-    raise ValueError('This should not be necessary to call!')
-
-  def verify_requests(self, experimental_requests):
-    return not experimental_requests
-
-  def verify_responses(self, experimental_responses):
-    return not experimental_responses
-
-
-class BidirectionallyUnaryScenario(ProtoScenario):
-  """A scenario that transmits no protocol buffers in either direction."""
-
-  _DIVIDEND = 59
-  _DIVISOR = 7
-  _QUOTIENT = 8
-  _REMAINDER = 3
-
-  _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR)
-  _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER)
-
-  def method(self):
-    return 'Div'
-
-  def serialize_request(self, request):
-    return request.SerializeToString()
-
-  def deserialize_request(self, request_bytestring):
-    return math_pb2.DivArgs.FromString(request_bytestring)
-
-  def serialize_response(self, response):
-    return response.SerializeToString()
-
-  def deserialize_response(self, response_bytestring):
-    return math_pb2.DivReply.FromString(response_bytestring)
-
-  def requests(self):
-    return [self._REQUEST]
-
-  def response_for_request(self, request):
-    return self._RESPONSE
-
-  def verify_requests(self, experimental_requests):
-    return tuple(experimental_requests) == (self._REQUEST,)
-
-  def verify_responses(self, experimental_responses):
-    return tuple(experimental_responses) == (self._RESPONSE,)
-
-
-class BidirectionallyStreamingScenario(ProtoScenario):
-  """A scenario that transmits no protocol buffers in either direction."""
-
-  _STREAM_LENGTH = 200
-  _REQUESTS = tuple(
-      math_pb2.DivArgs(dividend=59 + index, divisor=7 + index)
-      for index in range(_STREAM_LENGTH))
-
-  def __init__(self):
-    self._lock = threading.Lock()
-    self._responses = []
-
-  def method(self):
-    return 'DivMany'
-
-  def serialize_request(self, request):
-    return request.SerializeToString()
-
-  def deserialize_request(self, request_bytestring):
-    return math_pb2.DivArgs.FromString(request_bytestring)
-
-  def serialize_response(self, response):
-    return response.SerializeToString()
-
-  def deserialize_response(self, response_bytestring):
-    return math_pb2.DivReply.FromString(response_bytestring)
-
-  def requests(self):
-    return self._REQUESTS
-
-  def response_for_request(self, request):
-    quotient, remainder = divmod(request.dividend, request.divisor)
-    response = math_pb2.DivReply(quotient=quotient, remainder=remainder)
-    with self._lock:
-      self._responses.append(response)
-    return response
-
-  def verify_requests(self, experimental_requests):
-    return tuple(experimental_requests) == self._REQUESTS
-
-  def verify_responses(self, experimental_responses):
-    with self._lock:
-      return tuple(experimental_responses) == tuple(self._responses)
diff --git a/src/python/grpcio/tests/unit/_junkdrawer/math_pb2.py b/src/python/grpcio/tests/unit/_junkdrawer/math_pb2.py
deleted file mode 100644
index 20165955b4d8e894672bc07a1ec45b705c36d95e..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/_junkdrawer/math_pb2.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# TODO(nathaniel): Remove this from source control after having made
-# generation from the math.proto source part of GRPC's build-and-test
-# process.
-
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: math.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='math.proto',
-  package='math',
-  serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_DIVARGS = _descriptor.Descriptor(
-  name='DivArgs',
-  full_name='math.DivArgs',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='dividend', full_name='math.DivArgs.dividend', index=0,
-      number=1, type=3, cpp_type=2, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    _descriptor.FieldDescriptor(
-      name='divisor', full_name='math.DivArgs.divisor', index=1,
-      number=2, type=3, cpp_type=2, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=20,
-  serialized_end=64,
-)
-
-
-_DIVREPLY = _descriptor.Descriptor(
-  name='DivReply',
-  full_name='math.DivReply',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='quotient', full_name='math.DivReply.quotient', index=0,
-      number=1, type=3, cpp_type=2, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-    _descriptor.FieldDescriptor(
-      name='remainder', full_name='math.DivReply.remainder', index=1,
-      number=2, type=3, cpp_type=2, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=66,
-  serialized_end=113,
-)
-
-
-_FIBARGS = _descriptor.Descriptor(
-  name='FibArgs',
-  full_name='math.FibArgs',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='limit', full_name='math.FibArgs.limit', index=0,
-      number=1, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=115,
-  serialized_end=139,
-)
-
-
-_NUM = _descriptor.Descriptor(
-  name='Num',
-  full_name='math.Num',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='num', full_name='math.Num.num', index=0,
-      number=1, type=3, cpp_type=2, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=141,
-  serialized_end=159,
-)
-
-
-_FIBREPLY = _descriptor.Descriptor(
-  name='FibReply',
-  full_name='math.FibReply',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='count', full_name='math.FibReply.count', index=0,
-      number=1, type=3, cpp_type=2, label=2,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=161,
-  serialized_end=186,
-)
-
-DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS
-DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY
-DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS
-DESCRIPTOR.message_types_by_name['Num'] = _NUM
-DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY
-
-DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict(
-  DESCRIPTOR = _DIVARGS,
-  __module__ = 'math_pb2'
-  # @@protoc_insertion_point(class_scope:math.DivArgs)
-  ))
-_sym_db.RegisterMessage(DivArgs)
-
-DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict(
-  DESCRIPTOR = _DIVREPLY,
-  __module__ = 'math_pb2'
-  # @@protoc_insertion_point(class_scope:math.DivReply)
-  ))
-_sym_db.RegisterMessage(DivReply)
-
-FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict(
-  DESCRIPTOR = _FIBARGS,
-  __module__ = 'math_pb2'
-  # @@protoc_insertion_point(class_scope:math.FibArgs)
-  ))
-_sym_db.RegisterMessage(FibArgs)
-
-Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict(
-  DESCRIPTOR = _NUM,
-  __module__ = 'math_pb2'
-  # @@protoc_insertion_point(class_scope:math.Num)
-  ))
-_sym_db.RegisterMessage(Num)
-
-FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict(
-  DESCRIPTOR = _FIBREPLY,
-  __module__ = 'math_pb2'
-  # @@protoc_insertion_point(class_scope:math.FibReply)
-  ))
-_sym_db.RegisterMessage(FibReply)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/src/python/grpcio/tests/unit/_links/_proto_scenarios.py b/src/python/grpcio/tests/unit/_links/_proto_scenarios.py
deleted file mode 100644
index 50661085f9d1b3c0155242a10c4165e5ba6e5378..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/_links/_proto_scenarios.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Test scenarios using protocol buffers."""
-
-import abc
-import threading
-
-import six
-
-from tests.unit._junkdrawer import math_pb2
-from tests.unit.framework.common import test_constants
-
-
-class ProtoScenario(six.with_metaclass(abc.ABCMeta)):
-  """An RPC test scenario using protocol buffers."""
-
-  @abc.abstractmethod
-  def group_and_method(self):
-    """Access the test group and method.
-
-    Returns:
-      The test group and method as a pair.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def serialize_request(self, request):
-    """Serialize a request protocol buffer.
-
-    Args:
-      request: A request protocol buffer.
-
-    Returns:
-      The bytestring serialization of the given request protocol buffer.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deserialize_request(self, request_bytestring):
-    """Deserialize a request protocol buffer.
-
-    Args:
-      request_bytestring: The bytestring serialization of a request protocol
-        buffer.
-
-    Returns:
-      The request protocol buffer deserialized from the given byte string.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def serialize_response(self, response):
-    """Serialize a response protocol buffer.
-
-    Args:
-      response: A response protocol buffer.
-
-    Returns:
-      The bytestring serialization of the given response protocol buffer.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deserialize_response(self, response_bytestring):
-    """Deserialize a response protocol buffer.
-
-    Args:
-      response_bytestring: The bytestring serialization of a response protocol
-        buffer.
-
-    Returns:
-      The response protocol buffer deserialized from the given byte string.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def requests(self):
-    """Access the sequence of requests for this scenario.
-
-    Returns:
-      A sequence of request protocol buffers.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def response_for_request(self, request):
-    """Access the response for a particular request.
-
-    Args:
-      request: A request protocol buffer.
-
-    Returns:
-      The response protocol buffer appropriate for the given request.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def verify_requests(self, experimental_requests):
-    """Verify the requests transmitted through the system under test.
-
-    Args:
-      experimental_requests: The request protocol buffers transmitted through
-        the system under test.
-
-    Returns:
-      True if the requests satisfy this test scenario; False otherwise.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def verify_responses(self, experimental_responses):
-    """Verify the responses transmitted through the system under test.
-
-    Args:
-      experimental_responses: The response protocol buffers transmitted through
-        the system under test.
-
-    Returns:
-      True if the responses satisfy this test scenario; False otherwise.
-    """
-    raise NotImplementedError()
-
-
-class EmptyScenario(ProtoScenario):
-  """A scenario that transmits no protocol buffers in either direction."""
-
-  def group_and_method(self):
-    return 'math.Math', 'DivMany'
-
-  def serialize_request(self, request):
-    raise ValueError('This should not be necessary to call!')
-
-  def deserialize_request(self, request_bytestring):
-    raise ValueError('This should not be necessary to call!')
-
-  def serialize_response(self, response):
-    raise ValueError('This should not be necessary to call!')
-
-  def deserialize_response(self, response_bytestring):
-    raise ValueError('This should not be necessary to call!')
-
-  def requests(self):
-    return ()
-
-  def response_for_request(self, request):
-    raise ValueError('This should not be necessary to call!')
-
-  def verify_requests(self, experimental_requests):
-    return not experimental_requests
-
-  def verify_responses(self, experimental_responses):
-    return not experimental_responses
-
-
-class BidirectionallyUnaryScenario(ProtoScenario):
-  """A scenario that transmits no protocol buffers in either direction."""
-
-  _DIVIDEND = 59
-  _DIVISOR = 7
-  _QUOTIENT = 8
-  _REMAINDER = 3
-
-  _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR)
-  _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER)
-
-  def group_and_method(self):
-    return 'math.Math', 'Div'
-
-  def serialize_request(self, request):
-    return request.SerializeToString()
-
-  def deserialize_request(self, request_bytestring):
-    return math_pb2.DivArgs.FromString(request_bytestring)
-
-  def serialize_response(self, response):
-    return response.SerializeToString()
-
-  def deserialize_response(self, response_bytestring):
-    return math_pb2.DivReply.FromString(response_bytestring)
-
-  def requests(self):
-    return [self._REQUEST]
-
-  def response_for_request(self, request):
-    return self._RESPONSE
-
-  def verify_requests(self, experimental_requests):
-    return tuple(experimental_requests) == (self._REQUEST,)
-
-  def verify_responses(self, experimental_responses):
-    return tuple(experimental_responses) == (self._RESPONSE,)
-
-
-class BidirectionallyStreamingScenario(ProtoScenario):
-  """A scenario that transmits no protocol buffers in either direction."""
-
-  _REQUESTS = tuple(
-      math_pb2.DivArgs(dividend=59 + index, divisor=7 + index)
-      for index in range(test_constants.STREAM_LENGTH))
-
-  def __init__(self):
-    self._lock = threading.Lock()
-    self._responses = []
-
-  def group_and_method(self):
-    return 'math.Math', 'DivMany'
-
-  def serialize_request(self, request):
-    return request.SerializeToString()
-
-  def deserialize_request(self, request_bytestring):
-    return math_pb2.DivArgs.FromString(request_bytestring)
-
-  def serialize_response(self, response):
-    return response.SerializeToString()
-
-  def deserialize_response(self, response_bytestring):
-    return math_pb2.DivReply.FromString(response_bytestring)
-
-  def requests(self):
-    return self._REQUESTS
-
-  def response_for_request(self, request):
-    quotient, remainder = divmod(request.dividend, request.divisor)
-    response = math_pb2.DivReply(quotient=quotient, remainder=remainder)
-    with self._lock:
-      self._responses.append(response)
-    return response
-
-  def verify_requests(self, experimental_requests):
-    return tuple(experimental_requests) == self._REQUESTS
-
-  def verify_responses(self, experimental_responses):
-    with self._lock:
-      return tuple(experimental_responses) == tuple(self._responses)
diff --git a/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py b/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py
deleted file mode 100644
index 488f7d7141b78eea52583f8489cf7f9650a174ca..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Tests of grpc.beta._connectivity_channel."""
-
-import threading
-import time
-import unittest
-
-from grpc._adapter import _low
-from grpc._adapter import _types
-from grpc.beta import _connectivity_channel
-from grpc.beta import interfaces
-from tests.unit.framework.common import test_constants
-
-
-def _drive_completion_queue(completion_queue):
-  while True:
-    event = completion_queue.next(time.time() + 24 * 60 * 60)
-    if event.type == _types.EventType.QUEUE_SHUTDOWN:
-      break
-
-
-class _Callback(object):
-
-  def __init__(self):
-    self._condition = threading.Condition()
-    self._connectivities = []
-
-  def update(self, connectivity):
-    with self._condition:
-      self._connectivities.append(connectivity)
-      self._condition.notify()
-
-  def connectivities(self):
-    with self._condition:
-      return tuple(self._connectivities)
-
-  def block_until_connectivities_satisfy(self, predicate):
-    with self._condition:
-      while True:
-        connectivities = tuple(self._connectivities)
-        if predicate(connectivities):
-          return connectivities
-        else:
-          self._condition.wait()
-
-
-class ChannelConnectivityTest(unittest.TestCase):
-
-  def test_lonely_channel_connectivity(self):
-    low_channel = _low.Channel('localhost:12345', ())
-    callback = _Callback()
-
-    connectivity_channel = _connectivity_channel.ConnectivityChannel(
-        low_channel)
-    connectivity_channel.subscribe(callback.update, try_to_connect=False)
-    first_connectivities = callback.block_until_connectivities_satisfy(bool)
-    connectivity_channel.subscribe(callback.update, try_to_connect=True)
-    second_connectivities = callback.block_until_connectivities_satisfy(
-        lambda connectivities: 2 <= len(connectivities))
-    # Wait for a connection that will never happen.
-    time.sleep(test_constants.SHORT_TIMEOUT)
-    third_connectivities = callback.connectivities()
-    connectivity_channel.unsubscribe(callback.update)
-    fourth_connectivities = callback.connectivities()
-    connectivity_channel.unsubscribe(callback.update)
-    fifth_connectivities = callback.connectivities()
-
-    self.assertSequenceEqual(
-        (interfaces.ChannelConnectivity.IDLE,), first_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.READY, second_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.READY, third_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.READY, fourth_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.READY, fifth_connectivities)
-
-  def test_immediately_connectable_channel_connectivity(self):
-    server_completion_queue = _low.CompletionQueue()
-    server = _low.Server(server_completion_queue, [])
-    port = server.add_http2_port('[::]:0')
-    server.start()
-    server_completion_queue_thread = threading.Thread(
-        target=_drive_completion_queue, args=(server_completion_queue,))
-    server_completion_queue_thread.start()
-    low_channel = _low.Channel('localhost:%d' % port, ())
-    first_callback = _Callback()
-    second_callback = _Callback()
-
-    connectivity_channel = _connectivity_channel.ConnectivityChannel(
-        low_channel)
-    connectivity_channel.subscribe(first_callback.update, try_to_connect=False)
-    first_connectivities = first_callback.block_until_connectivities_satisfy(
-        bool)
-    # Wait for a connection that will never happen because try_to_connect=True
-    # has not yet been passed.
-    time.sleep(test_constants.SHORT_TIMEOUT)
-    second_connectivities = first_callback.connectivities()
-    connectivity_channel.subscribe(second_callback.update, try_to_connect=True)
-    third_connectivities = first_callback.block_until_connectivities_satisfy(
-        lambda connectivities: 2 <= len(connectivities))
-    fourth_connectivities = second_callback.block_until_connectivities_satisfy(
-        bool)
-    # Wait for a connection that will happen (or may already have happened).
-    first_callback.block_until_connectivities_satisfy(
-        lambda connectivities:
-        interfaces.ChannelConnectivity.READY in connectivities)
-    second_callback.block_until_connectivities_satisfy(
-        lambda connectivities:
-        interfaces.ChannelConnectivity.READY in connectivities)
-    connectivity_channel.unsubscribe(first_callback.update)
-    connectivity_channel.unsubscribe(second_callback.update)
-
-    server.shutdown()
-    server_completion_queue.shutdown()
-    server_completion_queue_thread.join()
-
-    self.assertSequenceEqual(
-        (interfaces.ChannelConnectivity.IDLE,), first_connectivities)
-    self.assertSequenceEqual(
-        (interfaces.ChannelConnectivity.IDLE,), second_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.TRANSIENT_FAILURE, third_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.FATAL_FAILURE, third_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.TRANSIENT_FAILURE,
-        fourth_connectivities)
-    self.assertNotIn(
-        interfaces.ChannelConnectivity.FATAL_FAILURE, fourth_connectivities)
-
-  def test_reachable_then_unreachable_channel_connectivity(self):
-    server_completion_queue = _low.CompletionQueue()
-    server = _low.Server(server_completion_queue, [])
-    port = server.add_http2_port('[::]:0')
-    server.start()
-    server_completion_queue_thread = threading.Thread(
-        target=_drive_completion_queue, args=(server_completion_queue,))
-    server_completion_queue_thread.start()
-    low_channel = _low.Channel('localhost:%d' % port, ())
-    callback = _Callback()
-
-    connectivity_channel = _connectivity_channel.ConnectivityChannel(
-        low_channel)
-    connectivity_channel.subscribe(callback.update, try_to_connect=True)
-    callback.block_until_connectivities_satisfy(
-        lambda connectivities:
-        interfaces.ChannelConnectivity.READY in connectivities)
-    # Now take down the server and confirm that channel readiness is repudiated.
-    server.shutdown()
-    callback.block_until_connectivities_satisfy(
-        lambda connectivities:
-        connectivities[-1] is not interfaces.ChannelConnectivity.READY)
-    connectivity_channel.unsubscribe(callback.update)
-
-    server.shutdown()
-    server_completion_queue.shutdown()
-    server_completion_queue_thread.join()
-
-
-class ConnectivityStatesTest(unittest.TestCase):
-
-  def testBetaConnectivityStates(self):
-    self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
-    self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
-    self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
-    self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
-    self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
-
-
-if __name__ == '__main__':
-  unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/_control.py b/src/python/grpcio/tests/unit/framework/interfaces/base/_control.py
deleted file mode 100644
index 0eb38abf222ea1fb625d44786136b9934004b585..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/_control.py
+++ /dev/null
@@ -1,570 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Part of the tests of the base interface of RPC Framework."""
-
-from __future__ import division
-
-import abc
-import collections
-import enum
-import random  # pylint: disable=unused-import
-import threading
-import time
-
-import six
-
-from grpc.framework.interfaces.base import base
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.base import _sequence
-from tests.unit.framework.interfaces.base import _state
-from tests.unit.framework.interfaces.base import test_interfaces  # pylint: disable=unused-import
-
-_GROUP = 'base test cases test group'
-_METHOD = 'base test cases test method'
-
-_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE = test_constants.PAYLOAD_SIZE // 20
-_MINIMUM_PAYLOAD_SIZE = test_constants.PAYLOAD_SIZE // 600
-
-
-def _create_payload(randomness):
-  length = randomness.randint(
-      _MINIMUM_PAYLOAD_SIZE, test_constants.PAYLOAD_SIZE)
-  random_section_length = randomness.randint(
-      0, min(_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE, length))
-  random_section = bytes(
-      bytearray(
-          randomness.getrandbits(8) for _ in range(random_section_length)))
-  sevens_section = b'\x07' * (length - random_section_length)
-  return b''.join(randomness.sample((random_section, sevens_section), 2))
-
-
-def _anything_in_flight(state):
-  return (
-      state.invocation_initial_metadata_in_flight is not None or
-      state.invocation_payloads_in_flight or
-      state.invocation_completion_in_flight is not None or
-      state.service_initial_metadata_in_flight is not None or
-      state.service_payloads_in_flight or
-      state.service_completion_in_flight is not None or
-      0 < state.invocation_allowance_in_flight or
-      0 < state.service_allowance_in_flight
-  )
-
-
-def _verify_service_advance_and_update_state(
-    initial_metadata, payload, completion, allowance, state, implementation):
-  if initial_metadata is not None:
-    if state.invocation_initial_metadata_received:
-      return 'Later invocation initial metadata received: %s' % (
-          initial_metadata,)
-    if state.invocation_payloads_received:
-      return 'Invocation initial metadata received after payloads: %s' % (
-          state.invocation_payloads_received)
-    if state.invocation_completion_received:
-      return 'Invocation initial metadata received after invocation completion!'
-    if not implementation.metadata_transmitted(
-        state.invocation_initial_metadata_in_flight, initial_metadata):
-      return 'Invocation initial metadata maltransmitted: %s, %s' % (
-          state.invocation_initial_metadata_in_flight, initial_metadata)
-    else:
-      state.invocation_initial_metadata_in_flight = None
-      state.invocation_initial_metadata_received = True
-
-  if payload is not None:
-    if state.invocation_completion_received:
-      return 'Invocation payload received after invocation completion!'
-    elif not state.invocation_payloads_in_flight:
-      return 'Invocation payload "%s" received but not in flight!' % (payload,)
-    elif state.invocation_payloads_in_flight[0] != payload:
-      return 'Invocation payload mismatch: %s, %s' % (
-          state.invocation_payloads_in_flight[0], payload)
-    elif state.service_side_invocation_allowance < 1:
-      return 'Disallowed invocation payload!'
-    else:
-      state.invocation_payloads_in_flight.pop(0)
-      state.invocation_payloads_received += 1
-      state.service_side_invocation_allowance -= 1
-
-  if completion is not None:
-    if state.invocation_completion_received:
-      return 'Later invocation completion received: %s' % (completion,)
-    elif not implementation.completion_transmitted(
-        state.invocation_completion_in_flight, completion):
-      return 'Invocation completion maltransmitted: %s, %s' % (
-          state.invocation_completion_in_flight, completion)
-    else:
-      state.invocation_completion_in_flight = None
-      state.invocation_completion_received = True
-
-  if allowance is not None:
-    if allowance <= 0:
-      return 'Illegal allowance value: %s' % (allowance,)
-    else:
-      state.service_allowance_in_flight -= allowance
-      state.service_side_service_allowance += allowance
-
-
-def _verify_invocation_advance_and_update_state(
-    initial_metadata, payload, completion, allowance, state, implementation):
-  if initial_metadata is not None:
-    if state.service_initial_metadata_received:
-      return 'Later service initial metadata received: %s' % (initial_metadata,)
-    if state.service_payloads_received:
-      return 'Service initial metadata received after service payloads: %s' % (
-          state.service_payloads_received)
-    if state.service_completion_received:
-      return 'Service initial metadata received after service completion!'
-    if not implementation.metadata_transmitted(
-        state.service_initial_metadata_in_flight, initial_metadata):
-      return 'Service initial metadata maltransmitted: %s, %s' % (
-          state.service_initial_metadata_in_flight, initial_metadata)
-    else:
-      state.service_initial_metadata_in_flight = None
-      state.service_initial_metadata_received = True
-
-  if payload is not None:
-    if state.service_completion_received:
-      return 'Service payload received after service completion!'
-    elif not state.service_payloads_in_flight:
-      return 'Service payload "%s" received but not in flight!' % (payload,)
-    elif state.service_payloads_in_flight[0] != payload:
-      return 'Service payload mismatch: %s, %s' % (
-          state.invocation_payloads_in_flight[0], payload)
-    elif state.invocation_side_service_allowance < 1:
-      return 'Disallowed service payload!'
-    else:
-      state.service_payloads_in_flight.pop(0)
-      state.service_payloads_received += 1
-      state.invocation_side_service_allowance -= 1
-
-  if completion is not None:
-    if state.service_completion_received:
-      return 'Later service completion received: %s' % (completion,)
-    elif not implementation.completion_transmitted(
-        state.service_completion_in_flight, completion):
-      return 'Service completion maltransmitted: %s, %s' % (
-          state.service_completion_in_flight, completion)
-    else:
-      state.service_completion_in_flight = None
-      state.service_completion_received = True
-
-  if allowance is not None:
-    if allowance <= 0:
-      return 'Illegal allowance value: %s' % (allowance,)
-    else:
-      state.invocation_allowance_in_flight -= allowance
-      state.invocation_side_service_allowance += allowance
-
-
-class Invocation(
-    collections.namedtuple(
-        'Invocation',
-        ('group', 'method', 'subscription_kind', 'timeout', 'initial_metadata',
-         'payload', 'completion',))):
-  """A description of operation invocation.
-
-  Attributes:
-    group: The group identifier for the operation.
-    method: The method identifier for the operation.
-    subscription_kind: A base.Subscription.Kind value describing the kind of
-      subscription to use for the operation.
-    timeout: A duration in seconds to pass as the timeout value for the
-      operation.
-    initial_metadata: An object to pass as the initial metadata for the
-      operation or None.
-    payload: An object to pass as a payload value for the operation or None.
-    completion: An object to pass as a completion value for the operation or
-      None.
-  """
-
-
-class OnAdvance(
-    collections.namedtuple(
-        'OnAdvance',
-        ('kind', 'initial_metadata', 'payload', 'completion', 'allowance'))):
-  """Describes action to be taken in a test in response to an advance call.
-
-  Attributes:
-    kind: A Kind value describing the overall kind of response.
-    initial_metadata: An initial metadata value to pass to a call of the advance
-      method of the operator under test. Only valid if kind is Kind.ADVANCE and
-      may be None.
-    payload: A payload value to pass to a call of the advance method of the
-      operator under test. Only valid if kind is Kind.ADVANCE and may be None.
-    completion: A base.Completion value to pass to a call of the advance method
-      of the operator under test. Only valid if kind is Kind.ADVANCE and may be
-      None.
-    allowance: An allowance value to pass to a call of the advance method of the
-      operator under test. Only valid if kind is Kind.ADVANCE and may be None.
-  """
-
-  @enum.unique
-  class Kind(enum.Enum):
-    ADVANCE = 'advance'
-    DEFECT = 'defect'
-    IDLE = 'idle'
-
-
-_DEFECT_ON_ADVANCE = OnAdvance(OnAdvance.Kind.DEFECT, None, None, None, None)
-_IDLE_ON_ADVANCE = OnAdvance(OnAdvance.Kind.IDLE, None, None, None, None)
-
-
-class Instruction(
-    collections.namedtuple(
-        'Instruction',
-        ('kind', 'advance_args', 'advance_kwargs', 'conclude_success',
-         'conclude_message', 'conclude_invocation_outcome_kind',
-         'conclude_service_outcome_kind',))):
-  """"""
-
-  @enum.unique
-  class Kind(enum.Enum):
-    ADVANCE = 'ADVANCE'
-    CANCEL = 'CANCEL'
-    CONCLUDE = 'CONCLUDE'
-
-
-class Controller(six.with_metaclass(abc.ABCMeta)):
-
-  @abc.abstractmethod
-  def failed(self, message):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def serialize_request(self, request):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deserialize_request(self, serialized_request):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def serialize_response(self, response):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def deserialize_response(self, serialized_response):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def invocation(self):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def poll(self):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def on_service_advance(
-      self, initial_metadata, payload, completion, allowance):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def on_invocation_advance(
-      self, initial_metadata, payload, completion, allowance):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def service_on_termination(self, outcome):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def invocation_on_termination(self, outcome):
-    """"""
-    raise NotImplementedError()
-
-
-class ControllerCreator(six.with_metaclass(abc.ABCMeta)):
-
-  @abc.abstractmethod
-  def name(self):
-    """"""
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def controller(self, implementation, randomness):
-    """"""
-    raise NotImplementedError()
-
-
-class _Remainder(
-    collections.namedtuple(
-        '_Remainder',
-        ('invocation_payloads', 'service_payloads', 'invocation_completion',
-         'service_completion',))):
-  """Describes work remaining to be done in a portion of a test.
-
-  Attributes:
-    invocation_payloads: The number of payloads to be sent from the invocation
-      side of the operation to the service side of the operation.
-    service_payloads: The number of payloads to be sent from the service side of
-      the operation to the invocation side of the operation.
-    invocation_completion: Whether or not completion from the invocation side of
-      the operation should be indicated and has yet to be indicated.
-    service_completion: Whether or not completion from the service side of the
-      operation should be indicated and has yet to be indicated.
-  """
-
-
-class _SequenceController(Controller):
-
-  def __init__(self, sequence, implementation, randomness):
-    """Constructor.
-
-    Args:
-      sequence: A _sequence.Sequence describing the steps to be taken in the
-        test at a relatively high level.
-      implementation: A test_interfaces.Implementation encapsulating the
-        base interface implementation that is the system under test.
-      randomness: A random.Random instance for use in the test.
-    """
-    self._condition = threading.Condition()
-    self._sequence = sequence
-    self._implementation = implementation
-    self._randomness = randomness
-
-    self._until = None
-    self._remaining_elements = None
-    self._poll_next = None
-    self._message = None
-
-    self._state = _state.OperationState()
-    self._todo = None
-
-  # called with self._condition
-  def _failed(self, message):
-    self._message = message
-    self._condition.notify_all()
-
-  def _passed(self, invocation_outcome, service_outcome):
-    self._poll_next = Instruction(
-        Instruction.Kind.CONCLUDE, None, None, True, None, invocation_outcome,
-        service_outcome)
-    self._condition.notify_all()
-
-  def failed(self, message):
-    with self._condition:
-      self._failed(message)
-
-  def serialize_request(self, request):
-    return request + request
-
-  def deserialize_request(self, serialized_request):
-    return serialized_request[:len(serialized_request) // 2]
-
-  def serialize_response(self, response):
-    return response * 3
-
-  def deserialize_response(self, serialized_response):
-    return serialized_response[2 * len(serialized_response) // 3:]
-
-  def invocation(self):
-    with self._condition:
-      self._until = time.time() + self._sequence.maximum_duration
-      self._remaining_elements = list(self._sequence.elements)
-      if self._sequence.invocation.initial_metadata:
-        initial_metadata = self._implementation.invocation_initial_metadata()
-        self._state.invocation_initial_metadata_in_flight = initial_metadata
-      else:
-        initial_metadata = None
-      if self._sequence.invocation.payload:
-        payload = _create_payload(self._randomness)
-        self._state.invocation_payloads_in_flight.append(payload)
-      else:
-        payload = None
-      if self._sequence.invocation.complete:
-        completion = self._implementation.invocation_completion()
-        self._state.invocation_completion_in_flight = completion
-      else:
-        completion = None
-      return Invocation(
-          _GROUP, _METHOD, base.Subscription.Kind.FULL,
-          self._sequence.invocation.timeout, initial_metadata, payload,
-          completion)
-
-  def poll(self):
-    with self._condition:
-      while True:
-        if self._message is not None:
-          return Instruction(
-              Instruction.Kind.CONCLUDE, None, None, False, self._message, None,
-              None)
-        elif self._poll_next:
-          poll_next = self._poll_next
-          self._poll_next = None
-          return poll_next
-        elif self._until < time.time():
-          return Instruction(
-              Instruction.Kind.CONCLUDE, None, None, False,
-              'overran allotted time!', None, None)
-        else:
-          self._condition.wait(timeout=self._until-time.time())
-
-  def on_service_advance(
-      self, initial_metadata, payload, completion, allowance):
-    with self._condition:
-      message = _verify_service_advance_and_update_state(
-          initial_metadata, payload, completion, allowance, self._state,
-          self._implementation)
-      if message is not None:
-        self._failed(message)
-      if self._todo is not None:
-        raise ValueError('TODO!!!')
-      elif _anything_in_flight(self._state):
-        return _IDLE_ON_ADVANCE
-      elif self._remaining_elements:
-        element = self._remaining_elements.pop(0)
-        if element.kind is _sequence.Element.Kind.SERVICE_TRANSMISSION:
-          if element.transmission.initial_metadata:
-            initial_metadata = self._implementation.service_initial_metadata()
-            self._state.service_initial_metadata_in_flight = initial_metadata
-          else:
-            initial_metadata = None
-          if element.transmission.payload:
-            payload = _create_payload(self._randomness)
-            self._state.service_payloads_in_flight.append(payload)
-            self._state.service_side_service_allowance -= 1
-          else:
-            payload = None
-          if element.transmission.complete:
-            completion = self._implementation.service_completion()
-            self._state.service_completion_in_flight = completion
-          else:
-            completion = None
-          if (not self._state.invocation_completion_received and
-              0 <= self._state.service_side_invocation_allowance):
-            allowance = 1
-            self._state.service_side_invocation_allowance += 1
-            self._state.invocation_allowance_in_flight += 1
-          else:
-            allowance = None
-          return OnAdvance(
-              OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion,
-              allowance)
-        else:
-          raise ValueError('TODO!!!')
-      else:
-        return _IDLE_ON_ADVANCE
-
-  def on_invocation_advance(
-      self, initial_metadata, payload, completion, allowance):
-    with self._condition:
-      message = _verify_invocation_advance_and_update_state(
-          initial_metadata, payload, completion, allowance, self._state,
-          self._implementation)
-      if message is not None:
-        self._failed(message)
-      if self._todo is not None:
-        raise ValueError('TODO!!!')
-      elif _anything_in_flight(self._state):
-        return _IDLE_ON_ADVANCE
-      elif self._remaining_elements:
-        element = self._remaining_elements.pop(0)
-        if element.kind is _sequence.Element.Kind.INVOCATION_TRANSMISSION:
-          if element.transmission.initial_metadata:
-            initial_metadata = self._implementation.invocation_initial_metadata()
-            self._state.invocation_initial_metadata_in_fight = initial_metadata
-          else:
-            initial_metadata = None
-          if element.transmission.payload:
-            payload = _create_payload(self._randomness)
-            self._state.invocation_payloads_in_flight.append(payload)
-            self._state.invocation_side_invocation_allowance -= 1
-          else:
-            payload = None
-          if element.transmission.complete:
-            completion = self._implementation.invocation_completion()
-            self._state.invocation_completion_in_flight = completion
-          else:
-            completion = None
-          if (not self._state.service_completion_received and
-              0 <= self._state.invocation_side_service_allowance):
-            allowance = 1
-            self._state.invocation_side_service_allowance += 1
-            self._state.service_allowance_in_flight += 1
-          else:
-            allowance = None
-          return OnAdvance(
-              OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion,
-              allowance)
-        else:
-          raise ValueError('TODO!!!')
-      else:
-        return _IDLE_ON_ADVANCE
-
-  def service_on_termination(self, outcome):
-    with self._condition:
-      self._state.service_side_outcome = outcome
-      if self._todo is not None or self._remaining_elements:
-        self._failed('Premature service-side outcome %s!' % (outcome,))
-      elif outcome.kind is not self._sequence.outcome_kinds.service:
-        self._failed(
-            'Incorrect service-side outcome kind: %s should have been %s' % (
-                outcome.kind, self._sequence.outcome_kinds.service))
-      elif self._state.invocation_side_outcome is not None:
-        self._passed(self._state.invocation_side_outcome.kind, outcome.kind)
-
-  def invocation_on_termination(self, outcome):
-    with self._condition:
-      self._state.invocation_side_outcome = outcome
-      if self._todo is not None or self._remaining_elements:
-        self._failed('Premature invocation-side outcome %s!' % (outcome,))
-      elif outcome.kind is not self._sequence.outcome_kinds.invocation:
-        self._failed(
-            'Incorrect invocation-side outcome kind: %s should have been %s' % (
-                outcome.kind, self._sequence.outcome_kinds.invocation))
-      elif self._state.service_side_outcome is not None:
-        self._passed(outcome.kind, self._state.service_side_outcome.kind)
-
-
-class _SequenceControllerCreator(ControllerCreator):
-
-  def __init__(self, sequence):
-    self._sequence = sequence
-
-  def name(self):
-    return self._sequence.name
-
-  def controller(self, implementation, randomness):
-    return _SequenceController(self._sequence, implementation, randomness)
-
-
-CONTROLLER_CREATORS = tuple(
-    _SequenceControllerCreator(sequence) for sequence in _sequence.SEQUENCES)
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/_sequence.py b/src/python/grpcio/tests/unit/framework/interfaces/base/_sequence.py
deleted file mode 100644
index 571d0e1e632e14db535c53f9b4128c49eb0e8860..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/_sequence.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Part of the tests of the base interface of RPC Framework."""
-
-import collections
-import enum
-
-from grpc.framework.interfaces.base import base
-from tests.unit.framework.common import test_constants
-
-
-class Invocation(
-    collections.namedtuple(
-        'Invocation', ('timeout', 'initial_metadata', 'payload', 'complete',))):
-  """A recipe for operation invocation.
-
-  Attributes:
-    timeout: A duration in seconds to pass to the system under test as the
-      operation's timeout value.
-    initial_metadata: A boolean indicating whether or not to pass initial
-      metadata when invoking the operation.
-    payload: A boolean indicating whether or not to pass a payload when
-      invoking the operation.
-    complete: A boolean indicating whether or not to indicate completion of
-      transmissions from the invoking side of the operation when invoking the
-      operation.
-  """
-
-
-class Transmission(
-    collections.namedtuple(
-        'Transmission', ('initial_metadata', 'payload', 'complete',))):
-  """A recipe for a single transmission in an operation.
-
-  Attributes:
-    initial_metadata: A boolean indicating whether or not to pass initial
-      metadata as part of the transmission.
-    payload: A boolean indicating whether or not to pass a payload as part of
-      the transmission.
-    complete: A boolean indicating whether or not to indicate completion of
-      transmission from the transmitting side of the operation as part of the
-      transmission.
-  """
-
-
-class Intertransmission(
-    collections.namedtuple('Intertransmission', ('invocation', 'service',))):
-  """A recipe for multiple transmissions in an operation.
-
-  Attributes:
-    invocation: An integer describing the number of payloads to send from the
-      invocation side of the operation to the service side.
-    service: An integer describing the number of payloads to send from the
-      service side of the operation to the invocation side.
-  """
-
-
-class Element(collections.namedtuple('Element', ('kind', 'transmission',))):
-  """A sum type for steps to perform when testing an operation.
-
-  Attributes:
-    kind: A Kind value describing the kind of step to perform in the test.
-    transmission: Only valid for kinds Kind.INVOCATION_TRANSMISSION and
-      Kind.SERVICE_TRANSMISSION, a Transmission value describing the details of
-      the transmission to be made.
-  """
-
-  @enum.unique
-  class Kind(enum.Enum):
-    INVOCATION_TRANSMISSION = 'invocation transmission'
-    SERVICE_TRANSMISSION = 'service transmission'
-    INTERTRANSMISSION = 'intertransmission'
-    INVOCATION_CANCEL = 'invocation cancel'
-    SERVICE_CANCEL = 'service cancel'
-    INVOCATION_FAILURE = 'invocation failure'
-    SERVICE_FAILURE = 'service failure'
-
-
-class OutcomeKinds(
-    collections.namedtuple('Outcome', ('invocation', 'service',))):
-  """A description of the expected outcome of an operation test.
-
-  Attributes:
-    invocation: The base.Outcome.Kind value expected on the invocation side of
-      the operation.
-    service: The base.Outcome.Kind value expected on the service side of the
-      operation.
-  """
-
-
-class Sequence(
-    collections.namedtuple(
-        'Sequence',
-        ('name', 'maximum_duration', 'invocation', 'elements',
-         'outcome_kinds',))):
-  """Describes at a high level steps to perform in a test.
-
-  Attributes:
-    name: The string name of the sequence.
-    maximum_duration: A length of time in seconds to allow for the test before
-      declaring it to have failed.
-    invocation: An Invocation value describing how to invoke the operation
-      under test.
-    elements: A sequence of Element values describing at coarse granularity
-      actions to take during the operation under test.
-    outcome_kinds: An OutcomeKinds value describing the expected outcome kinds
-      of the test.
-  """
-
-_EASY = Sequence(
-    'Easy',
-    test_constants.TIME_ALLOWANCE,
-    Invocation(test_constants.LONG_TIMEOUT, True, True, True),
-    (
-        Element(
-            Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, True)),
-    ),
-    OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED))
-
-_PEASY = Sequence(
-    'Peasy',
-    test_constants.TIME_ALLOWANCE,
-    Invocation(test_constants.LONG_TIMEOUT, True, True, False),
-    (
-        Element(
-            Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, False)),
-        Element(
-            Element.Kind.INVOCATION_TRANSMISSION,
-            Transmission(False, True, True)),
-        Element(
-            Element.Kind.SERVICE_TRANSMISSION, Transmission(False, True, True)),
-    ),
-    OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED))
-
-
-# TODO(issue 2959): Finish this test suite. This tuple of sequences should
-# contain at least the values in the Cartesian product of (half-duplex,
-# full-duplex) * (zero payloads, one payload, test_constants.STREAM_LENGTH
-# payloads) * (completion, cancellation, expiration, programming defect in
-# servicer code).
-SEQUENCES = (
-    _EASY,
-    _PEASY,
-)
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/_state.py b/src/python/grpcio/tests/unit/framework/interfaces/base/_state.py
deleted file mode 100644
index 21cf33aeb6aa67c4c39ab1b4b7c2212620dc80b0..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/_state.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Part of the tests of the base interface of RPC Framework."""
-
-
-class OperationState(object):
-
-  def __init__(self):
-    self.invocation_initial_metadata_in_flight = None
-    self.invocation_initial_metadata_received = False
-    self.invocation_payloads_in_flight = []
-    self.invocation_payloads_received = 0
-    self.invocation_completion_in_flight = None
-    self.invocation_completion_received = False
-    self.service_initial_metadata_in_flight = None
-    self.service_initial_metadata_received = False
-    self.service_payloads_in_flight = []
-    self.service_payloads_received = 0
-    self.service_completion_in_flight = None
-    self.service_completion_received = False
-    self.invocation_side_invocation_allowance = 1
-    self.invocation_side_service_allowance = 1
-    self.service_side_invocation_allowance = 1
-    self.service_side_service_allowance = 1
-    self.invocation_allowance_in_flight = 0
-    self.service_allowance_in_flight = 0
-    self.invocation_side_outcome = None
-    self.service_side_outcome = None
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py b/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py
deleted file mode 100644
index 5d16bf98be3b067f74ef81365a67877f5a8a82cc..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py
+++ /dev/null
@@ -1,279 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Tests of the base interface of RPC Framework."""
-
-from __future__ import division
-
-import logging
-import random
-import threading
-import time
-import unittest
-
-from grpc.framework.foundation import logging_pool
-from grpc.framework.interfaces.base import base
-from grpc.framework.interfaces.base import utilities
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.base import _control
-from tests.unit.framework.interfaces.base import test_interfaces
-
-_SYNCHRONICITY_VARIATION = (('Sync', False), ('Async', True))
-
-_EMPTY_OUTCOME_KIND_DICT = {
-    outcome_kind: 0 for outcome_kind in base.Outcome.Kind}
-
-
-class _Serialization(test_interfaces.Serialization):
-
-  def serialize_request(self, request):
-    return request + request
-
-  def deserialize_request(self, serialized_request):
-    return serialized_request[:len(serialized_request) // 2]
-
-  def serialize_response(self, response):
-    return response * 3
-
-  def deserialize_response(self, serialized_response):
-    return serialized_response[2 * len(serialized_response) // 3:]
-
-
-def _advance(quadruples, operator, controller):
-  try:
-    for quadruple in quadruples:
-      operator.advance(
-          initial_metadata=quadruple[0], payload=quadruple[1],
-          completion=quadruple[2], allowance=quadruple[3])
-  except Exception as e:  # pylint: disable=broad-except
-    controller.failed('Exception on advance: %e' % e)
-
-
-class _Operator(base.Operator):
-
-  def __init__(self, controller, on_advance, pool, operator_under_test):
-    self._condition = threading.Condition()
-    self._controller = controller
-    self._on_advance = on_advance
-    self._pool = pool
-    self._operator_under_test = operator_under_test
-    self._pending_advances = []
-
-  def set_operator_under_test(self, operator_under_test):
-    with self._condition:
-      self._operator_under_test = operator_under_test
-      pent_advances = self._pending_advances
-      self._pending_advances = []
-      pool = self._pool
-      controller = self._controller
-
-    if pool is None:
-      _advance(pent_advances, operator_under_test, controller)
-    else:
-      pool.submit(_advance, pent_advances, operator_under_test, controller)
-
-  def advance(
-      self, initial_metadata=None, payload=None, completion=None,
-      allowance=None):
-    on_advance = self._on_advance(
-        initial_metadata, payload, completion, allowance)
-    if on_advance.kind is _control.OnAdvance.Kind.ADVANCE:
-      with self._condition:
-        pool = self._pool
-        operator_under_test = self._operator_under_test
-        controller = self._controller
-
-      quadruple = (
-          on_advance.initial_metadata, on_advance.payload,
-          on_advance.completion, on_advance.allowance)
-      if pool is None:
-        _advance((quadruple,), operator_under_test, controller)
-      else:
-        pool.submit(_advance, (quadruple,), operator_under_test, controller)
-    elif on_advance.kind is _control.OnAdvance.Kind.DEFECT:
-      raise ValueError(
-          'Deliberately raised exception from Operator.advance (in a test)!')
-
-
-class _ProtocolReceiver(base.ProtocolReceiver):
-
-  def __init__(self):
-    self._condition = threading.Condition()
-    self._contexts = []
-
-  def context(self, protocol_context):
-    with self._condition:
-      self._contexts.append(protocol_context)
-
-
-class _Servicer(base.Servicer):
-  """A base.Servicer with instrumented for testing."""
-
-  def __init__(self, group, method, controllers, pool):
-    self._condition = threading.Condition()
-    self._group = group
-    self._method = method
-    self._pool = pool
-    self._controllers = list(controllers)
-
-  def service(self, group, method, context, output_operator):
-    with self._condition:
-      controller = self._controllers.pop(0)
-      if group != self._group or method != self._method:
-        controller.fail(
-            '%s != %s or %s != %s' % (group, self._group, method, self._method))
-        raise base.NoSuchMethodError(None, None)
-      else:
-        operator = _Operator(
-            controller, controller.on_service_advance, self._pool,
-            output_operator)
-        outcome = context.add_termination_callback(
-            controller.service_on_termination)
-        if outcome is not None:
-          controller.service_on_termination(outcome)
-        return utilities.full_subscription(operator, _ProtocolReceiver())
-
-
-class _OperationTest(unittest.TestCase):
-
-  def setUp(self):
-    if self._synchronicity_variation:
-      self._pool = logging_pool.pool(test_constants.POOL_SIZE)
-    else:
-      self._pool = None
-    self._controller = self._controller_creator.controller(
-        self._implementation, self._randomness)
-
-  def tearDown(self):
-    if self._synchronicity_variation:
-      self._pool.shutdown(wait=True)
-    else:
-      self._pool = None
-
-  def test_operation(self):
-    invocation = self._controller.invocation()
-    if invocation.subscription_kind is base.Subscription.Kind.FULL:
-      test_operator = _Operator(
-          self._controller, self._controller.on_invocation_advance,
-          self._pool, None)
-      subscription = utilities.full_subscription(
-          test_operator, _ProtocolReceiver())
-    else:
-      # TODO(nathaniel): support and test other subscription kinds.
-      self.fail('Non-full subscriptions not yet supported!')
-
-    servicer = _Servicer(
-        invocation.group, invocation.method, (self._controller,), self._pool)
-
-    invocation_end, service_end, memo = self._implementation.instantiate(
-        {(invocation.group, invocation.method): _Serialization()}, servicer)
-
-    try:
-      invocation_end.start()
-      service_end.start()
-      operation_context, operator_under_test = invocation_end.operate(
-          invocation.group, invocation.method, subscription, invocation.timeout,
-          initial_metadata=invocation.initial_metadata, payload=invocation.payload,
-          completion=invocation.completion)
-      test_operator.set_operator_under_test(operator_under_test)
-      outcome = operation_context.add_termination_callback(
-          self._controller.invocation_on_termination)
-      if outcome is not None:
-        self._controller.invocation_on_termination(outcome)
-    except Exception as e:  # pylint: disable=broad-except
-      self._controller.failed('Exception on invocation: %s' % e)
-      self.fail(e)
-
-    while True:
-      instruction = self._controller.poll()
-      if instruction.kind is _control.Instruction.Kind.ADVANCE:
-        try:
-          test_operator.advance(
-              *instruction.advance_args, **instruction.advance_kwargs)
-        except Exception as e:  # pylint: disable=broad-except
-          self._controller.failed('Exception on instructed advance: %s' % e)
-      elif instruction.kind is _control.Instruction.Kind.CANCEL:
-        try:
-          operation_context.cancel()
-        except Exception as e:  # pylint: disable=broad-except
-          self._controller.failed('Exception on cancel: %s' % e)
-      elif instruction.kind is _control.Instruction.Kind.CONCLUDE:
-        break
-
-    invocation_stop_event = invocation_end.stop(0)
-    service_stop_event = service_end.stop(0)
-    invocation_stop_event.wait()
-    service_stop_event.wait()
-    invocation_stats = invocation_end.operation_stats()
-    service_stats = service_end.operation_stats()
-
-    self._implementation.destantiate(memo)
-
-    self.assertTrue(
-        instruction.conclude_success, msg=instruction.conclude_message)
-
-    expected_invocation_stats = dict(_EMPTY_OUTCOME_KIND_DICT)
-    expected_invocation_stats[
-        instruction.conclude_invocation_outcome_kind] += 1
-    self.assertDictEqual(expected_invocation_stats, invocation_stats)
-    expected_service_stats = dict(_EMPTY_OUTCOME_KIND_DICT)
-    expected_service_stats[instruction.conclude_service_outcome_kind] += 1
-    self.assertDictEqual(expected_service_stats, service_stats)
-
-
-def test_cases(implementation):
-  """Creates unittest.TestCase classes for a given Base implementation.
-
-  Args:
-    implementation: A test_interfaces.Implementation specifying creation and
-      destruction of the Base implementation under test.
-
-  Returns:
-    A sequence of subclasses of unittest.TestCase defining tests of the
-      specified Base layer implementation.
-  """
-  random_seed = hash(time.time())
-  logging.warning('Random seed for this execution: %s', random_seed)
-  randomness = random.Random(x=random_seed)
-
-  test_case_classes = []
-  for synchronicity_variation in _SYNCHRONICITY_VARIATION:
-    for controller_creator in _control.CONTROLLER_CREATORS:
-      name = ''.join(
-          (synchronicity_variation[0], controller_creator.name(), 'Test',))
-      test_case_classes.append(
-          type(name, (_OperationTest,),
-               {'_implementation': implementation,
-                '_randomness': randomness,
-                '_synchronicity_variation': synchronicity_variation[1],
-                '_controller_creator': controller_creator,
-                '__module__': implementation.__module__,
-               }))
-
-  return test_case_classes
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/test_interfaces.py b/src/python/grpcio/tests/unit/framework/interfaces/base/test_interfaces.py
deleted file mode 100644
index 5eba475ba89eeeefdd1bb241a658a7d025a880bf..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/test_interfaces.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Interfaces used in tests of implementations of the Base layer."""
-
-import abc
-
-import six
-
-from grpc.framework.interfaces.base import base  # pylint: disable=unused-import
-
-
-class Serialization(six.with_metaclass(abc.ABCMeta)):
-  """Specifies serialization and deserialization of test payloads."""
-
-  def serialize_request(self, request):
-    """Serializes a request value used in a test.
-
-    Args:
-      request: A request value created by a test.
-
-    Returns:
-      A bytestring that is the serialization of the given request.
-    """
-    raise NotImplementedError()
-
-  def deserialize_request(self, serialized_request):
-    """Deserializes a request value used in a test.
-
-    Args:
-      serialized_request: A bytestring that is the serialization of some request
-        used in a test.
-
-    Returns:
-      The request value encoded by the given bytestring.
-    """
-    raise NotImplementedError()
-
-  def serialize_response(self, response):
-    """Serializes a response value used in a test.
-
-    Args:
-      response: A response value created by a test.
-
-    Returns:
-      A bytestring that is the serialization of the given response.
-    """
-    raise NotImplementedError()
-
-  def deserialize_response(self, serialized_response):
-    """Deserializes a response value used in a test.
-
-    Args:
-      serialized_response: A bytestring that is the serialization of some
-        response used in a test.
-
-    Returns:
-      The response value encoded by the given bytestring.
-    """
-    raise NotImplementedError()
-
-
-class Implementation(six.with_metaclass(abc.ABCMeta)):
-  """Specifies an implementation of the Base layer."""
-
-  @abc.abstractmethod
-  def instantiate(self, serializations, servicer):
-    """Instantiates the Base layer implementation to be used in a test.
-
-    Args:
-      serializations: A dict from group-method pair to Serialization object
-        specifying how to serialize and deserialize payload values used in the
-        test.
-      servicer: A base.Servicer object to be called to service RPCs made during
-        the test.
-
-    Returns:
-      A sequence of length three the first element of which is a
-        base.End to be used to invoke RPCs, the second element of which is a
-        base.End to be used to service invoked RPCs, and the third element of
-        which is an arbitrary memo object to be kept and passed to destantiate
-        at the conclusion of the test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def destantiate(self, memo):
-    """Destroys the Base layer implementation under test.
-
-    Args:
-      memo: The object from the third position of the return value of a call to
-        instantiate.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def invocation_initial_metadata(self):
-    """Provides an operation's invocation-side initial metadata.
-
-    Returns:
-      A value to use for an operation's invocation-side initial metadata, or
-        None.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def service_initial_metadata(self):
-    """Provides an operation's service-side initial metadata.
-
-    Returns:
-      A value to use for an operation's service-side initial metadata, or
-        None.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def invocation_completion(self):
-    """Provides an operation's invocation-side completion.
-
-    Returns:
-      A base.Completion to use for an operation's invocation-side completion.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def service_completion(self):
-    """Provides an operation's service-side completion.
-
-    Returns:
-      A base.Completion to use for an operation's service-side completion.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def metadata_transmitted(self, original_metadata, transmitted_metadata):
-    """Identifies whether or not metadata was properly transmitted.
-
-    Args:
-      original_metadata: A metadata value passed to the system under test.
-      transmitted_metadata: The same metadata value after having been
-        transmitted through the system under test.
-
-    Returns:
-      Whether or not the metadata was properly transmitted.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def completion_transmitted(self, original_completion, transmitted_completion):
-    """Identifies whether or not a base.Completion was properly transmitted.
-
-    Args:
-      original_completion: A base.Completion passed to the system under test.
-      transmitted_completion: The same completion value after having been
-        transmitted through the system under test.
-
-    Returns:
-      Whether or not the completion was properly transmitted.
-    """
-    raise NotImplementedError()
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_receiver.py b/src/python/grpcio/tests/unit/framework/interfaces/face/_receiver.py
deleted file mode 100644
index 48f31fc6770af65d8371745d4ac4ad94824ad875..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/face/_receiver.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""A utility useful in tests of asynchronous, event-driven interfaces."""
-
-import threading
-
-from grpc.framework.interfaces.face import face
-
-
-class Receiver(face.ResponseReceiver):
-  """A utility object useful in tests of asynchronous code."""
-
-  def __init__(self):
-    self._condition = threading.Condition()
-    self._initial_metadata = None
-    self._responses = []
-    self._terminal_metadata = None
-    self._code = None
-    self._details = None
-    self._completed = False
-    self._abortion = None
-
-  def abort(self, abortion):
-    with self._condition:
-      self._abortion = abortion
-      self._condition.notify_all()
-
-  def initial_metadata(self, initial_metadata):
-    with self._condition:
-      self._initial_metadata = initial_metadata
-
-  def response(self, response):
-    with self._condition:
-      self._responses.append(response)
-
-  def complete(self, terminal_metadata, code, details):
-    with self._condition:
-      self._terminal_metadata = terminal_metadata
-      self._code = code
-      self._details = details
-      self._completed = True
-      self._condition.notify_all()
-
-  def block_until_terminated(self):
-    with self._condition:
-      while self._abortion is None and not self._completed:
-        self._condition.wait()
-
-  def unary_response(self):
-    with self._condition:
-      if self._abortion is not None:
-        raise AssertionError('Aborted: "{}"!'.format(self._abortion))
-      elif len(self._responses) != 1:
-        raise AssertionError(
-            '%d responses received, not exactly one!', len(self._responses))
-      else:
-        return self._responses[0]
-
-  def stream_responses(self):
-    with self._condition:
-      if self._abortion is None:
-        return list(self._responses)
-      else:
-        raise AssertionError('Aborted: "{}"!'.format(self._abortion))
-
-  def abortion(self):
-    with self._condition:
-      return self._abortion
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/links/__init__.py b/src/python/grpcio/tests/unit/framework/interfaces/links/__init__.py
deleted file mode 100644
index 708651910607ffb686d781713f6893567821b9fd..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/links/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/links/test_cases.py b/src/python/grpcio/tests/unit/framework/interfaces/links/test_cases.py
deleted file mode 100644
index 608e64119ec9586637d5c55ac0800d3a499014cc..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/links/test_cases.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Tests of the links interface of RPC Framework."""
-
-# unittest is referenced from specification in this module.
-import abc
-import unittest  # pylint: disable=unused-import
-
-import six
-
-from grpc.framework.interfaces.links import links
-from tests.unit.framework.common import test_constants
-from tests.unit.framework.interfaces.links import test_utilities
-
-
-def at_least_n_payloads_received_predicate(n):
-  def predicate(ticket_sequence):
-    payload_count = 0
-    for ticket in ticket_sequence:
-      if ticket.payload is not None:
-        payload_count += 1
-        if n <= payload_count:
-          return True
-    else:
-      return False
-  return predicate
-
-
-def terminated(ticket_sequence):
-  return ticket_sequence and ticket_sequence[-1].termination is not None
-
-_TRANSMISSION_GROUP = 'test.Group'
-_TRANSMISSION_METHOD = 'TestMethod'
-
-
-class TransmissionTest(six.with_metaclass(abc.ABCMeta)):
-  """Tests ticket transmission between two connected links.
-
-  This class must be mixed into a unittest.TestCase that implements the abstract
-  methods it provides.
-  """
-
-  # This is a unittest.TestCase mix-in.
-  # pylint: disable=invalid-name
-
-  @abc.abstractmethod
-  def create_transmitting_links(self):
-    """Creates two connected links for use in this test.
-
-    Returns:
-      Two links.Links, the first of which will be used on the invocation side
-        of RPCs and the second of which will be used on the service side of
-        RPCs.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def destroy_transmitting_links(self, invocation_side_link, service_side_link):
-    """Destroys the two connected links created for this test.
-
-
-    Args:
-      invocation_side_link: The link used on the invocation side of RPCs in
-        this test.
-      service_side_link: The link used on the service side of RPCs in this
-        test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def create_invocation_initial_metadata(self):
-    """Creates a value for use as invocation-side initial metadata.
-
-    Returns:
-      A metadata value appropriate for use as invocation-side initial metadata
-        or None if invocation-side initial metadata transmission is not
-        supported by the links under test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def create_invocation_terminal_metadata(self):
-    """Creates a value for use as invocation-side terminal metadata.
-
-    Returns:
-      A metadata value appropriate for use as invocation-side terminal
-        metadata or None if invocation-side terminal metadata transmission is
-        not supported by the links under test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def create_service_initial_metadata(self):
-    """Creates a value for use as service-side initial metadata.
-
-    Returns:
-      A metadata value appropriate for use as service-side initial metadata or
-        None if service-side initial metadata transmission is not supported by
-        the links under test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def create_service_terminal_metadata(self):
-    """Creates a value for use as service-side terminal metadata.
-
-    Returns:
-      A metadata value appropriate for use as service-side terminal metadata or
-        None if service-side terminal metadata transmission is not supported by
-        the links under test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def create_invocation_completion(self):
-    """Creates values for use as invocation-side code and message.
-
-    Returns:
-      An invocation-side code value and an invocation-side message value.
-        Either or both may be None if invocation-side code and/or
-        invocation-side message transmission is not supported by the links
-        under test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def create_service_completion(self):
-    """Creates values for use as service-side code and message.
-
-    Returns:
-      A service-side code value and a service-side message value. Either or
-        both may be None if service-side code and/or service-side message
-        transmission is not supported by the links under test.
-    """
-    raise NotImplementedError()
-
-  @abc.abstractmethod
-  def assertMetadataTransmitted(self, original_metadata, transmitted_metadata):
-    """Asserts that transmitted_metadata contains original_metadata.
-
-    Args:
-      original_metadata: A metadata object used in this test.
-      transmitted_metadata: A metadata object obtained after transmission
-        through the system under test.
-
-    Raises:
-      AssertionError: if the transmitted_metadata object does not contain
-        original_metadata.
-    """
-    raise NotImplementedError()
-
-  def group_and_method(self):
-    """Returns the group and method used in this test case.
-
-    Returns:
-      A pair of the group and method used in this test case.
-    """
-    return _TRANSMISSION_GROUP, _TRANSMISSION_METHOD
-
-  def serialize_request(self, request):
-    """Serializes a request value used in this test case.
-
-    Args:
-      request: A request value created by this test case.
-
-    Returns:
-      A bytestring that is the serialization of the given request.
-    """
-    return request
-
-  def deserialize_request(self, serialized_request):
-    """Deserializes a request value used in this test case.
-
-    Args:
-      serialized_request: A bytestring that is the serialization of some request
-        used in this test case.
-
-    Returns:
-      The request value encoded by the given bytestring.
-    """
-    return serialized_request
-
-  def serialize_response(self, response):
-    """Serializes a response value used in this test case.
-
-    Args:
-      response: A response value created by this test case.
-
-    Returns:
-      A bytestring that is the serialization of the given response.
-    """
-    return response
-
-  def deserialize_response(self, serialized_response):
-    """Deserializes a response value used in this test case.
-
-    Args:
-      serialized_response: A bytestring that is the serialization of some
-        response used in this test case.
-
-    Returns:
-      The response value encoded by the given bytestring.
-    """
-    return serialized_response
-
-  def _assert_is_valid_metadata_payload_sequence(
-      self, ticket_sequence, payloads, initial_metadata, terminal_metadata):
-    initial_metadata_seen = False
-    seen_payloads = []
-    terminal_metadata_seen = False
-
-    for ticket in ticket_sequence:
-      if ticket.initial_metadata is not None:
-        self.assertFalse(initial_metadata_seen)
-        self.assertFalse(seen_payloads)
-        self.assertFalse(terminal_metadata_seen)
-        self.assertMetadataTransmitted(initial_metadata, ticket.initial_metadata)
-        initial_metadata_seen = True
-
-      if ticket.payload is not None:
-        self.assertFalse(terminal_metadata_seen)
-        seen_payloads.append(ticket.payload)
-
-      if ticket.terminal_metadata is not None:
-        self.assertFalse(terminal_metadata_seen)
-        self.assertMetadataTransmitted(terminal_metadata, ticket.terminal_metadata)
-        terminal_metadata_seen = True
-    self.assertSequenceEqual(payloads, seen_payloads)
-
-  def _assert_is_valid_invocation_sequence(
-      self, ticket_sequence, group, method, payloads, initial_metadata,
-      terminal_metadata, termination):
-    self.assertLess(0, len(ticket_sequence))
-    self.assertEqual(group, ticket_sequence[0].group)
-    self.assertEqual(method, ticket_sequence[0].method)
-    self._assert_is_valid_metadata_payload_sequence(
-        ticket_sequence, payloads, initial_metadata, terminal_metadata)
-    self.assertIs(termination, ticket_sequence[-1].termination)
-
-  def _assert_is_valid_service_sequence(
-      self, ticket_sequence, payloads, initial_metadata, terminal_metadata,
-      code, message, termination):
-    self.assertLess(0, len(ticket_sequence))
-    self._assert_is_valid_metadata_payload_sequence(
-        ticket_sequence, payloads, initial_metadata, terminal_metadata)
-    self.assertEqual(code, ticket_sequence[-1].code)
-    self.assertEqual(message, ticket_sequence[-1].message)
-    self.assertIs(termination, ticket_sequence[-1].termination)
-
-  def setUp(self):
-    self._invocation_link, self._service_link = self.create_transmitting_links()
-    self._invocation_mate = test_utilities.RecordingLink()
-    self._service_mate = test_utilities.RecordingLink()
-    self._invocation_link.join_link(self._invocation_mate)
-    self._service_link.join_link(self._service_mate)
-
-  def tearDown(self):
-    self.destroy_transmitting_links(self._invocation_link, self._service_link)
-
-  def testSimplestRoundTrip(self):
-    """Tests transmission of one ticket in each direction."""
-    invocation_operation_id = object()
-    invocation_payload = b'\x07' * 1023
-    timeout = test_constants.LONG_TIMEOUT
-    invocation_initial_metadata = self.create_invocation_initial_metadata()
-    invocation_terminal_metadata = self.create_invocation_terminal_metadata()
-    invocation_code, invocation_message = self.create_invocation_completion()
-    service_payload = b'\x08' * 1025
-    service_initial_metadata = self.create_service_initial_metadata()
-    service_terminal_metadata = self.create_service_terminal_metadata()
-    service_code, service_message = self.create_service_completion()
-
-    original_invocation_ticket = links.Ticket(
-        invocation_operation_id, 0, _TRANSMISSION_GROUP, _TRANSMISSION_METHOD,
-        links.Ticket.Subscription.FULL, timeout, 0, invocation_initial_metadata,
-        invocation_payload, invocation_terminal_metadata, invocation_code,
-        invocation_message, links.Ticket.Termination.COMPLETION, None)
-    self._invocation_link.accept_ticket(original_invocation_ticket)
-
-    self._service_mate.block_until_tickets_satisfy(
-        at_least_n_payloads_received_predicate(1))
-    service_operation_id = self._service_mate.tickets()[0].operation_id
-
-    self._service_mate.block_until_tickets_satisfy(terminated)
-    self._assert_is_valid_invocation_sequence(
-        self._service_mate.tickets(), _TRANSMISSION_GROUP, _TRANSMISSION_METHOD,
-        (invocation_payload,), invocation_initial_metadata,
-        invocation_terminal_metadata, links.Ticket.Termination.COMPLETION)
-
-    original_service_ticket = links.Ticket(
-        service_operation_id, 0, None, None, links.Ticket.Subscription.FULL,
-        timeout, 0, service_initial_metadata, service_payload,
-        service_terminal_metadata, service_code, service_message,
-        links.Ticket.Termination.COMPLETION, None)
-    self._service_link.accept_ticket(original_service_ticket)
-    self._invocation_mate.block_until_tickets_satisfy(terminated)
-    self._assert_is_valid_service_sequence(
-        self._invocation_mate.tickets(), (service_payload,),
-        service_initial_metadata, service_terminal_metadata, service_code,
-        service_message, links.Ticket.Termination.COMPLETION)
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/links/test_utilities.py b/src/python/grpcio/tests/unit/framework/interfaces/links/test_utilities.py
deleted file mode 100644
index 39c7f2fc6355a4beaaf6e4e96823d046eaded194..0000000000000000000000000000000000000000
--- a/src/python/grpcio/tests/unit/framework/interfaces/links/test_utilities.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""State and behavior appropriate for use in tests."""
-
-import logging
-import threading
-import time
-
-from grpc.framework.interfaces.links import links
-from grpc.framework.interfaces.links import utilities
-
-# A more-or-less arbitrary limit on the length of raw data values to be logged.
-_UNCOMFORTABLY_LONG = 48
-
-
-def _safe_for_log_ticket(ticket):
-  """Creates a safe-for-printing-to-the-log ticket for a given ticket.
-
-  Args:
-    ticket: Any links.Ticket.
-
-  Returns:
-    A links.Ticket that is as much as can be equal to the given ticket but
-      possibly features values like the string "<payload of length 972321>" in
-      place of the actual values of the given ticket.
-  """
-  if isinstance(ticket.payload, (basestring,)):
-    payload_length = len(ticket.payload)
-  else:
-    payload_length = -1
-  if payload_length < _UNCOMFORTABLY_LONG:
-    return ticket
-  else:
-    return links.Ticket(
-        ticket.operation_id, ticket.sequence_number,
-        ticket.group, ticket.method, ticket.subscription, ticket.timeout,
-        ticket.allowance, ticket.initial_metadata,
-        '<payload of length {}>'.format(payload_length),
-        ticket.terminal_metadata, ticket.code, ticket.message,
-        ticket.termination, None)
-
-
-class RecordingLink(links.Link):
-  """A Link that records every ticket passed to it."""
-
-  def __init__(self):
-    self._condition = threading.Condition()
-    self._tickets = []
-
-  def accept_ticket(self, ticket):
-    with self._condition:
-      self._tickets.append(ticket)
-      self._condition.notify_all()
-
-  def join_link(self, link):
-    pass
-
-  def block_until_tickets_satisfy(self, predicate):
-    """Blocks until the received tickets satisfy the given predicate.
-
-    Args:
-      predicate: A callable that takes a sequence of tickets and returns a
-        boolean value.
-    """
-    with self._condition:
-      while not predicate(self._tickets):
-        self._condition.wait()
-
-  def tickets(self):
-    """Returns a copy of the list of all tickets received by this Link."""
-    with self._condition:
-      return tuple(self._tickets)
-
-
-class _Pipe(object):
-  """A conduit that logs all tickets passed through it."""
-
-  def __init__(self, name):
-    self._lock = threading.Lock()
-    self._name = name
-    self._left_mate = utilities.NULL_LINK
-    self._right_mate = utilities.NULL_LINK
-
-  def accept_left_to_right_ticket(self, ticket):
-    with self._lock:
-      logging.warning(
-          '%s: moving left to right through %s: %s', time.time(), self._name,
-          _safe_for_log_ticket(ticket))
-      try:
-        self._right_mate.accept_ticket(ticket)
-      except Exception as e:  # pylint: disable=broad-except
-        logging.exception(e)
-
-  def accept_right_to_left_ticket(self, ticket):
-    with self._lock:
-      logging.warning(
-          '%s: moving right to left through %s: %s', time.time(), self._name,
-          _safe_for_log_ticket(ticket))
-      try:
-        self._left_mate.accept_ticket(ticket)
-      except Exception as e:  # pylint: disable=broad-except
-        logging.exception(e)
-
-  def join_left_mate(self, left_mate):
-    with self._lock:
-      self._left_mate = utilities.NULL_LINK if left_mate is None else left_mate
-
-  def join_right_mate(self, right_mate):
-    with self._lock:
-      self._right_mate = (
-          utilities.NULL_LINK if right_mate is None else right_mate)
-
-
-class _Facade(links.Link):
-
-  def __init__(self, accept, join):
-    self._accept = accept
-    self._join = join
-
-  def accept_ticket(self, ticket):
-    self._accept(ticket)
-
-  def join_link(self, link):
-    self._join(link)
-
-
-def logging_links(name):
-  """Creates a conduit that logs all tickets passed through it.
-
-  Args:
-    name: A name to use for the conduit to identify itself in logging output.
-
-  Returns:
-    Two links.Links, the first of which is the "left" side of the conduit
-      and the second of which is the "right" side of the conduit.
-  """
-  pipe = _Pipe(name)
-  left_facade = _Facade(pipe.accept_left_to_right_ticket, pipe.join_left_mate)
-  right_facade = _Facade(pipe.accept_right_to_left_ticket, pipe.join_right_mate)
-  return left_facade, right_facade
diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py
index 631066f33102aac79d4503cfa749b94153e7a4ef..a7a59f6974b76427dc04c6829723f7a772ec0c61 100644
--- a/src/python/grpcio_health_checking/health_commands.py
+++ b/src/python/grpcio_health_checking/health_commands.py
@@ -39,51 +39,17 @@ import sys
 
 import setuptools
 from setuptools.command import build_py
-from setuptools.command import sdist
 
 ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
 HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/health/v1/health.proto')
 
 
-class BuildProtoModules(setuptools.Command):
-  """Command to generate project *_pb2.py modules from proto files."""
+class CopyProtoModules(setuptools.Command):
+  """Command to copy proto modules from grpc/src/proto."""
 
   description = ''
   user_options = []
 
-  def initialize_options(self):
-    pass
-
-  def finalize_options(self):
-    self.protoc_command = distutils.spawn.find_executable('protoc')
-    self.grpc_python_plugin_command = distutils.spawn.find_executable(
-        'grpc_python_plugin')
-
-  def run(self):
-    paths = []
-    root_directory = os.getcwd()
-    for walk_root, directories, filenames in os.walk(root_directory):
-      for filename in filenames:
-        if filename.endswith('.proto'):
-          paths.append(os.path.join(walk_root, filename))
-    command = [
-        self.protoc_command,
-        '--plugin=protoc-gen-python-grpc={}'.format(
-            self.grpc_python_plugin_command),
-        '-I {}'.format(root_directory),
-        '--python_out={}'.format(root_directory),
-        '--python-grpc_out={}'.format(root_directory),
-    ] + paths
-    try:
-      subprocess.check_output(' '.join(command), cwd=root_directory, shell=True,
-                              stderr=subprocess.STDOUT)
-    except subprocess.CalledProcessError as e:
-      raise Exception('{}\nOutput:\n{}'.format(e.message, e.output))
-
-
-class CopyProtoModules(setuptools.Command):
-  """Command to copy proto modules from grpc/src/proto."""
-
   def initialize_options(self):
     pass
 
@@ -101,14 +67,5 @@ class BuildPy(build_py.build_py):
   """Custom project build command."""
 
   def run(self):
-    self.run_command('copy_proto_modules')
     self.run_command('build_proto_modules')
     build_py.build_py.run(self)
-
-
-class SDist(sdist.sdist):
-  """Custom project build command."""
-
-  def run(self):
-    self.run_command('copy_proto_modules')
-    sdist.sdist.run(self)
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index d68a7ced8ed61dc392a607ec6dd2ad444793bc56..70b4575bf5d1ebb24a03884656e4033c275295b8 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -36,36 +36,44 @@ import sys
 from distutils import core as _core
 import setuptools
 
+import grpc.tools.command
+
 # Ensure we're in the proper directory whether or not we're being used by pip.
 os.chdir(os.path.dirname(os.path.abspath(__file__)))
 
 # Break import-style to ensure we can actually find our commands module.
 import health_commands
 
-_PACKAGES = (
+PACKAGES = (
     setuptools.find_packages('.')
 )
 
-_PACKAGE_DIRECTORIES = {
+PACKAGE_DIRECTORIES = {
     '': '.',
 }
 
-_INSTALL_REQUIRES = (
+SETUP_REQUIRES = (
+    'grpcio-tools>=0.14.0',
+)
+
+INSTALL_REQUIRES = (
     'grpcio>=0.13.1',
 )
 
-_COMMAND_CLASS = {
-    'copy_proto_modules': health_commands.CopyProtoModules,
-    'build_proto_modules': health_commands.BuildProtoModules,
+COMMAND_CLASS = {
+    # Run preprocess from the repository *before* doing any packaging!
+    'preprocess': health_commands.CopyProtoModules,
+
+    'build_proto_modules': grpc.tools.command.BuildProtoModules,
     'build_py': health_commands.BuildPy,
-    'sdist': health_commands.SDist,
 }
 
 setuptools.setup(
-    name='grpcio_health_checking',
-    version='0.14.0b0',
-    packages=list(_PACKAGES),
-    package_dir=_PACKAGE_DIRECTORIES,
-    install_requires=_INSTALL_REQUIRES,
-    cmdclass=_COMMAND_CLASS
+    name='grpcio-health-checking',
+    version='0.14.0',
+    packages=list(PACKAGES),
+    package_dir=PACKAGE_DIRECTORIES,
+    install_requires=INSTALL_REQUIRES,
+    setup_requires=SETUP_REQUIRES,
+    cmdclass=COMMAND_CLASS
 )
diff --git a/src/python/grpcio_tests/.gitignore b/src/python/grpcio_tests/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fc620135dc7a0942bce5fb56973b348ba2141e01
--- /dev/null
+++ b/src/python/grpcio_tests/.gitignore
@@ -0,0 +1,4 @@
+proto/
+src/
+*_pb2.py
+*.egg-info/
diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py
new file mode 100644
index 0000000000000000000000000000000000000000..171829b62fccf3429ad59f6f5589b2b0a7e54057
--- /dev/null
+++ b/src/python/grpcio_tests/commands.py
@@ -0,0 +1,217 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Provides distutils command classes for the gRPC Python setup process."""
+
+import distutils
+import glob
+import os
+import os.path
+import platform
+import re
+import shutil
+import subprocess
+import sys
+import traceback
+
+import setuptools
+from setuptools.command import build_ext
+from setuptools.command import build_py
+from setuptools.command import easy_install
+from setuptools.command import install
+from setuptools.command import test
+
+PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
+GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
+GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
+PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
+PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
+
+
+class CommandError(object):
+  pass
+
+
+class GatherProto(setuptools.Command):
+
+  description = 'gather proto dependencies'
+  user_options = []
+
+  def initialize_options(self):
+    pass
+
+  def finalize_options(self):
+    pass
+
+  def run(self):
+    # TODO(atash) ensure that we're running from the repository directory when
+    # this command is used
+    try:
+      shutil.rmtree(PROTO_STEM)
+    except Exception as error:
+      # We don't care if this command fails
+      pass
+    shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
+    for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
+      path = os.path.join(root, '__init__.py')
+      open(path, 'a').close()
+
+
+class BuildProtoModules(setuptools.Command):
+  """Command to generate project *_pb2.py modules from proto files."""
+
+  description = 'build protobuf modules'
+  user_options = [
+    ('include=', None, 'path patterns to include in protobuf generation'),
+    ('exclude=', None, 'path patterns to exclude from protobuf generation')
+  ]
+
+  def initialize_options(self):
+    self.exclude = None
+    self.include = r'.*\.proto$'
+
+  def finalize_options(self):
+    pass
+
+  def run(self):
+    import grpc.tools.protoc as protoc
+
+    include_regex = re.compile(self.include)
+    exclude_regex = re.compile(self.exclude) if self.exclude else None
+    paths = []
+    for walk_root, directories, filenames in os.walk(PROTO_STEM):
+      for filename in filenames:
+        path = os.path.join(walk_root, filename)
+        if include_regex.match(path) and not (
+            exclude_regex and exclude_regex.match(path)):
+          paths.append(path)
+
+    # TODO(kpayson): It would be nice to do this in a batch command,
+    # but we currently have name conflicts in src/proto
+    for path in paths:
+      command = [
+          'grpc.tools.protoc',
+          '-I {}'.format(PROTO_STEM),
+          '--python_out={}'.format(PROTO_STEM),
+          '--grpc_python_out={}'.format(PROTO_STEM),
+      ] + [path]
+      if protoc.main(command) != 0:
+        sys.stderr.write(
+            'warning: Command:\n{}\nFailed'.format(
+                command))
+
+    # Generated proto directories dont include __init__.py, but
+    # these are needed for python package resolution
+    for walk_root, _, _ in os.walk(PROTO_STEM):
+      path = os.path.join(walk_root, '__init__.py')
+      open(path, 'a').close()
+
+
+class BuildPy(build_py.build_py):
+  """Custom project build command."""
+
+  def run(self):
+    try:
+      self.run_command('build_proto_modules')
+    except CommandError as error:
+      sys.stderr.write('warning: %s\n' % error.message)
+    build_py.build_py.run(self)
+
+
+class TestLite(setuptools.Command):
+  """Command to run tests without fetching or building anything."""
+
+  description = 'run tests without fetching or building anything.'
+  user_options = []
+
+  def initialize_options(self):
+    pass
+
+  def finalize_options(self):
+    # distutils requires this override.
+    pass
+
+  def run(self):
+    self._add_eggs_to_path()
+
+    import tests
+    loader = tests.Loader()
+    loader.loadTestsFromNames(['tests'])
+    runner = tests.Runner()
+    result = runner.run(loader.suite)
+    if not result.wasSuccessful():
+      sys.exit('Test failure')
+
+  def _add_eggs_to_path(self):
+    """Fetch install and test requirements"""
+    self.distribution.fetch_build_eggs(self.distribution.install_requires)
+    self.distribution.fetch_build_eggs(self.distribution.tests_require)
+
+
+class RunInterop(test.test):
+
+  description = 'run interop test client/server'
+  user_options = [
+    ('args=', 'a', 'pass-thru arguments for the client/server'),
+    ('client', 'c', 'flag indicating to run the client'),
+    ('server', 's', 'flag indicating to run the server')
+  ]
+
+  def initialize_options(self):
+    self.args = ''
+    self.client = False
+    self.server = False
+
+  def finalize_options(self):
+    if self.client and self.server:
+      raise DistutilsOptionError('you may only specify one of client or server')
+
+  def run(self):
+    if self.distribution.install_requires:
+      self.distribution.fetch_build_eggs(self.distribution.install_requires)
+    if self.distribution.tests_require:
+      self.distribution.fetch_build_eggs(self.distribution.tests_require)
+    if self.client:
+      self.run_client()
+    elif self.server:
+      self.run_server()
+
+  def run_server(self):
+    # We import here to ensure that our setuptools parent has had a chance to
+    # edit the Python system path.
+    from tests.interop import server
+    sys.argv[1:] = self.args.split()
+    server.serve()
+
+  def run_client(self):
+    # We import here to ensure that our setuptools parent has had a chance to
+    # edit the Python system path.
+    from tests.interop import client
+    sys.argv[1:] = self.args.split()
+    client.test_interoperability()
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/__init__.py b/src/python/grpcio_tests/grpc_version.py
similarity index 91%
rename from src/python/grpcio/tests/unit/framework/interfaces/base/__init__.py
rename to src/python/grpcio_tests/grpc_version.py
index 708651910607ffb686d781713f6893567821b9fd..90f68a5741ab4aa502fc91b8b6a1e2d20ef83806 100644
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/__init__.py
+++ b/src/python/grpcio_tests/grpc_version.py
@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,4 +27,6 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
 
+VERSION='1.1.0.dev0'
diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..7eef420bdb01ed848048823957a8f21dcca58ae6
--- /dev/null
+++ b/src/python/grpcio_tests/setup.py
@@ -0,0 +1,124 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A setup module for the gRPC Python package."""
+
+import os
+import os.path
+import shutil
+import sys
+
+from distutils import core as _core
+from distutils import extension as _extension
+import setuptools
+from setuptools.command import egg_info
+
+import grpc.tools.command
+
+PY3 = sys.version_info.major == 3
+
+# Ensure we're in the proper directory whether or not we're being used by pip.
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+# Break import-style to ensure we can actually find our in-repo dependencies.
+import commands
+import grpc_version
+
+LICENSE = '3-clause BSD'
+
+PACKAGE_DIRECTORIES = {
+    '': '.',
+}
+
+INSTALL_REQUIRES = (
+    'coverage>=4.0',
+    'enum34>=1.0.4',
+    'futures>=2.2.0',
+    'grpcio>=0.14.0',
+    'grpcio-health-checking>=0.14.0',
+    'oauth2client>=1.4.7',
+    'protobuf>=3.0.0a3',
+    'six>=1.10',
+)
+
+SETUP_REQUIRES = (
+    'grpcio-tools>=0.14.0',
+)
+
+COMMAND_CLASS = {
+    # Run `preprocess` *before* doing any packaging!
+    'preprocess': commands.GatherProto,
+
+    'build_proto_modules': grpc.tools.command.BuildProtoModules,
+    'build_py': commands.BuildPy,
+    'run_interop': commands.RunInterop,
+    'test_lite': commands.TestLite
+}
+
+PACKAGE_DATA = {
+    'tests.interop': [
+        'credentials/ca.pem',
+        'credentials/server1.key',
+        'credentials/server1.pem',
+    ],
+    'tests.protoc_plugin': [
+        'protoc_plugin_test.proto',
+    ],
+    'tests.unit': [
+        'credentials/ca.pem',
+        'credentials/server1.key',
+        'credentials/server1.pem',
+    ],
+    'tests': [
+        'tests.json'
+    ],
+}
+
+TEST_SUITE = 'tests'
+TEST_LOADER = 'tests:Loader'
+TEST_RUNNER = 'tests:Runner'
+TESTS_REQUIRE = INSTALL_REQUIRES
+
+PACKAGES = setuptools.find_packages('.')
+
+setuptools.setup(
+  name='grpcio-tests',
+  version=grpc_version.VERSION,
+  license=LICENSE,
+  packages=list(PACKAGES),
+  package_dir=PACKAGE_DIRECTORIES,
+  package_data=PACKAGE_DATA,
+  install_requires=INSTALL_REQUIRES,
+  setup_requires=SETUP_REQUIRES,
+  cmdclass=COMMAND_CLASS,
+  tests_require=TESTS_REQUIRE,
+  test_suite=TEST_SUITE,
+  test_loader=TEST_LOADER,
+  test_runner=TEST_RUNNER,
+)
diff --git a/src/python/grpcio/tests/__init__.py b/src/python/grpcio_tests/tests/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/__init__.py
rename to src/python/grpcio_tests/tests/__init__.py
diff --git a/src/python/grpcio/tests/_loader.py b/src/python/grpcio_tests/tests/_loader.py
similarity index 100%
rename from src/python/grpcio/tests/_loader.py
rename to src/python/grpcio_tests/tests/_loader.py
diff --git a/src/python/grpcio/tests/_result.py b/src/python/grpcio_tests/tests/_result.py
similarity index 100%
rename from src/python/grpcio/tests/_result.py
rename to src/python/grpcio_tests/tests/_result.py
diff --git a/src/python/grpcio/tests/_runner.py b/src/python/grpcio_tests/tests/_runner.py
similarity index 94%
rename from src/python/grpcio/tests/_runner.py
rename to src/python/grpcio_tests/tests/_runner.py
index f0718573e2c3bdf8430a75f15ff73b8f43fc9061..926dcbe23a1a7824623777ef748afced5ae39df2 100644
--- a/src/python/grpcio/tests/_runner.py
+++ b/src/python/grpcio_tests/tests/_runner.py
@@ -30,7 +30,6 @@
 from __future__ import absolute_import
 
 import collections
-import fcntl
 import multiprocessing
 import os
 import select
@@ -178,15 +177,20 @@ class Runner(object):
         stderr_pipe.write_bypass(
             '\ninterrupted stderr:\n{}\n'.format(stderr_pipe.output().decode()))
         os._exit(1)
-    signal.signal(signal.SIGINT, sigint_handler)
-    signal.signal(signal.SIGSEGV, fault_handler)
-    signal.signal(signal.SIGBUS, fault_handler)
-    signal.signal(signal.SIGABRT, fault_handler)
-    signal.signal(signal.SIGFPE, fault_handler)
-    signal.signal(signal.SIGILL, fault_handler)
+    def try_set_handler(name, handler):
+      try:
+        signal.signal(getattr(signal, name), handler)
+      except AttributeError:
+        pass
+    try_set_handler('SIGINT', sigint_handler)
+    try_set_handler('SIGSEGV', fault_handler)
+    try_set_handler('SIGBUS', fault_handler)
+    try_set_handler('SIGABRT', fault_handler)
+    try_set_handler('SIGFPE', fault_handler)
+    try_set_handler('SIGILL', fault_handler)
     # Sometimes output will lag after a test has successfully finished; we
     # ignore such writes to our pipes.
-    signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+    try_set_handler('SIGPIPE', signal.SIG_IGN)
 
     # Run the tests
     result.startTestRun()
diff --git a/src/python/grpcio/tests/health_check/__init__.py b/src/python/grpcio_tests/tests/health_check/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/health_check/__init__.py
rename to src/python/grpcio_tests/tests/health_check/__init__.py
diff --git a/src/python/grpcio/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
similarity index 100%
rename from src/python/grpcio/tests/health_check/_health_servicer_test.py
rename to src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
diff --git a/src/python/grpcio/grpc/_adapter/__init__.py b/src/python/grpcio_tests/tests/interop/__init__.py
similarity index 100%
rename from src/python/grpcio/grpc/_adapter/__init__.py
rename to src/python/grpcio_tests/tests/interop/__init__.py
diff --git a/src/python/grpcio/tests/interop/_insecure_interop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py
similarity index 97%
rename from src/python/grpcio/tests/interop/_insecure_interop_test.py
rename to src/python/grpcio_tests/tests/interop/_insecure_interop_test.py
index 91519b6fba20a758b34a976e50a5a9fc0edb07b4..c753d6faf0514108052051d3d9299458e3e87ea6 100644
--- a/src/python/grpcio/tests/interop/_insecure_interop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py
@@ -48,7 +48,7 @@ class InsecureInteropTest(
     port = self.server.add_insecure_port('[::]:0')
     self.server.start()
     self.stub = test_pb2.beta_create_TestService_stub(
-        implementations.insecure_channel('[::]', port))
+        implementations.insecure_channel('localhost', port))
 
   def tearDown(self):
     self.server.stop(0)
diff --git a/src/python/grpcio/tests/interop/_interop_test_case.py b/src/python/grpcio_tests/tests/interop/_interop_test_case.py
similarity index 100%
rename from src/python/grpcio/tests/interop/_interop_test_case.py
rename to src/python/grpcio_tests/tests/interop/_interop_test_case.py
diff --git a/src/python/grpcio/tests/interop/_secure_interop_test.py b/src/python/grpcio_tests/tests/interop/_secure_interop_test.py
similarity index 97%
rename from src/python/grpcio/tests/interop/_secure_interop_test.py
rename to src/python/grpcio_tests/tests/interop/_secure_interop_test.py
index c61547b97780c6ba28ba480a475020653fd1d3e8..cb09f54a347833b2654e2fe51ccff31404e62cb7 100644
--- a/src/python/grpcio/tests/interop/_secure_interop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_secure_interop_test.py
@@ -55,7 +55,7 @@ class SecureInteropTest(
     self.server.start()
     self.stub = test_pb2.beta_create_TestService_stub(
         test_utilities.not_really_secure_channel(
-            '[::]', port, implementations.ssl_channel_credentials(
+            'localhost', port, implementations.ssl_channel_credentials(
                 resources.test_root_certificates()),
                 _SERVER_HOST_OVERRIDE))
 
diff --git a/src/python/grpcio/tests/interop/client.py b/src/python/grpcio_tests/tests/interop/client.py
similarity index 100%
rename from src/python/grpcio/tests/interop/client.py
rename to src/python/grpcio_tests/tests/interop/client.py
diff --git a/src/python/grpcio/tests/interop/credentials/README b/src/python/grpcio_tests/tests/interop/credentials/README
similarity index 100%
rename from src/python/grpcio/tests/interop/credentials/README
rename to src/python/grpcio_tests/tests/interop/credentials/README
diff --git a/src/python/grpcio/tests/interop/credentials/ca.pem b/src/python/grpcio_tests/tests/interop/credentials/ca.pem
similarity index 100%
rename from src/python/grpcio/tests/interop/credentials/ca.pem
rename to src/python/grpcio_tests/tests/interop/credentials/ca.pem
diff --git a/src/python/grpcio/tests/interop/credentials/server1.key b/src/python/grpcio_tests/tests/interop/credentials/server1.key
similarity index 100%
rename from src/python/grpcio/tests/interop/credentials/server1.key
rename to src/python/grpcio_tests/tests/interop/credentials/server1.key
diff --git a/src/python/grpcio/tests/interop/credentials/server1.pem b/src/python/grpcio_tests/tests/interop/credentials/server1.pem
similarity index 100%
rename from src/python/grpcio/tests/interop/credentials/server1.pem
rename to src/python/grpcio_tests/tests/interop/credentials/server1.pem
diff --git a/src/python/grpcio/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
similarity index 86%
rename from src/python/grpcio/tests/interop/methods.py
rename to src/python/grpcio_tests/tests/interop/methods.py
index 7eac5115258d2c2cbdbcec0ea2864496028ee43d..97e6c9e27ef2c6aeeb2556c52f325ab9662b4393 100644
--- a/src/python/grpcio/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -39,6 +39,7 @@ import time
 
 from oauth2client import client as oauth2client_client
 
+import grpc
 from grpc.beta import implementations
 from grpc.beta import interfaces
 from grpc.framework.common import cardinality
@@ -57,12 +58,18 @@ class TestService(test_pb2.BetaTestServiceServicer):
     return empty_pb2.Empty()
 
   def UnaryCall(self, request, context):
+    if request.HasField('response_status'):
+      context.code(request.response_status.code)
+      context.details(request.response_status.message)
     return messages_pb2.SimpleResponse(
         payload=messages_pb2.Payload(
             type=messages_pb2.COMPRESSABLE,
             body=b'\x00' * request.response_size))
 
   def StreamingOutputCall(self, request, context):
+    if request.HasField('response_status'):
+      context.code(request.response_status.code)
+      context.details(request.response_status.message)
     for response_parameters in request.response_parameters:
       yield messages_pb2.StreamingOutputCallResponse(
           payload=messages_pb2.Payload(
@@ -79,10 +86,14 @@ class TestService(test_pb2.BetaTestServiceServicer):
 
   def FullDuplexCall(self, request_iterator, context):
     for request in request_iterator:
-      yield messages_pb2.StreamingOutputCallResponse(
-          payload=messages_pb2.Payload(
-              type=request.payload.type,
-              body=b'\x00' * request.response_parameters[0].size))
+      if request.HasField('response_status'):
+        context.code(request.response_status.code)
+        context.details(request.response_status.message)
+      for response_parameters in request.response_parameters:
+        yield messages_pb2.StreamingOutputCallResponse(
+            payload=messages_pb2.Payload(
+                type=request.payload.type,
+                body=b'\x00' * response_parameters.size))
 
   # NOTE(nathaniel): Apparently this is the same as the full-duplex call?
   # NOTE(atash): It isn't even called in the interop spec (Oct 22 2015)...
@@ -288,6 +299,39 @@ def _empty_stream(stub):
       pass
 
 
+def _status_code_and_message(stub):
+  with stub:
+    message = 'test status message'
+    code = 2
+    status = grpc.StatusCode.UNKNOWN # code = 2
+    request = messages_pb2.SimpleRequest(
+        response_type=messages_pb2.COMPRESSABLE,
+        response_size=1,
+        payload=messages_pb2.Payload(body=b'\x00'),
+        response_status=messages_pb2.EchoStatus(code=code, message=message)
+        )
+    response_future = stub.UnaryCall.future(request, _TIMEOUT)
+    if response_future.code() != status:
+      raise ValueError(
+        'expected code %s, got %s' % (status, response_future.code()))
+    if response_future.details() != message:
+      raise ValueError(
+        'expected message %s, got %s' % (message, response_future.details()))
+
+    request = messages_pb2.StreamingOutputCallRequest(
+        response_type=messages_pb2.COMPRESSABLE,
+        response_parameters=(
+            messages_pb2.ResponseParameters(size=1),),
+        response_status=messages_pb2.EchoStatus(code=code, message=message))
+    response_iterator = stub.StreamingOutputCall(request, _TIMEOUT)
+    if response_future.code() != status:
+      raise ValueError(
+        'expected code %s, got %s' % (status, response_iterator.code()))
+    if response_future.details() != message:
+      raise ValueError(
+        'expected message %s, got %s' % (message, response_iterator.details()))
+
+
 def _compute_engine_creds(stub, args):
   response = _large_unary_common_behavior(stub, True, True)
   if args.default_service_account != response.username:
@@ -346,6 +390,7 @@ class TestCase(enum.Enum):
   CANCEL_AFTER_BEGIN = 'cancel_after_begin'
   CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
   EMPTY_STREAM = 'empty_stream'
+  STATUS_CODE_AND_MESSAGE = 'status_code_and_message'
   COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
   OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
   JWT_TOKEN_CREDS = 'jwt_token_creds'
@@ -371,6 +416,8 @@ class TestCase(enum.Enum):
       _timeout_on_sleeping_server(stub)
     elif self is TestCase.EMPTY_STREAM:
       _empty_stream(stub)
+    elif self is TestCase.STATUS_CODE_AND_MESSAGE:
+      _status_code_and_message(stub)
     elif self is TestCase.COMPUTE_ENGINE_CREDS:
       _compute_engine_creds(stub, args)
     elif self is TestCase.OAUTH2_AUTH_TOKEN:
diff --git a/src/python/grpcio/tests/interop/resources.py b/src/python/grpcio_tests/tests/interop/resources.py
similarity index 100%
rename from src/python/grpcio/tests/interop/resources.py
rename to src/python/grpcio_tests/tests/interop/resources.py
diff --git a/src/python/grpcio/tests/interop/server.py b/src/python/grpcio_tests/tests/interop/server.py
similarity index 100%
rename from src/python/grpcio/tests/interop/server.py
rename to src/python/grpcio_tests/tests/interop/server.py
diff --git a/src/python/grpcio/grpc/_links/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/__init__.py
similarity index 100%
rename from src/python/grpcio/grpc/_links/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/__init__.py
diff --git a/src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
similarity index 67%
rename from src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py
rename to src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
index 1c9cbb0d0c37fc6c7598e496ed8115cf1388053e..7ca2bcff38328a42c44dedad60daa385ce2e0e3d 100644
--- a/src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
@@ -45,6 +45,11 @@ from six import moves
 import grpc
 from tests.unit.framework.common import test_constants
 
+import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
+import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
+import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
+import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2
+
 # Identifiers of entities we expect to find in the generated module.
 STUB_IDENTIFIER = 'TestServiceStub'
 SERVICER_IDENTIFIER = 'TestServiceServicer'
@@ -53,12 +58,10 @@ ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
 
 class _ServicerMethods(object):
 
-  def __init__(self, response_pb2, payload_pb2):
+  def __init__(self):
     self._condition = threading.Condition()
     self._paused = False
     self._fail = False
-    self._response_pb2 = response_pb2
-    self._payload_pb2 = payload_pb2
 
   @contextlib.contextmanager
   def pause(self):  # pylint: disable=invalid-name
@@ -85,22 +88,22 @@ class _ServicerMethods(object):
         self._condition.wait()
 
   def UnaryCall(self, request, unused_rpc_context):
-    response = self._response_pb2.SimpleResponse()
-    response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+    response = response_pb2.SimpleResponse()
+    response.payload.payload_type = payload_pb2.COMPRESSABLE
     response.payload.payload_compressable = 'a' * request.response_size
     self._control()
     return response
 
   def StreamingOutputCall(self, request, unused_rpc_context):
     for parameter in request.response_parameters:
-      response = self._response_pb2.StreamingOutputCallResponse()
-      response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+      response = response_pb2.StreamingOutputCallResponse()
+      response.payload.payload_type = payload_pb2.COMPRESSABLE
       response.payload.payload_compressable = 'a' * parameter.size
       self._control()
       yield response
 
   def StreamingInputCall(self, request_iter, unused_rpc_context):
-    response = self._response_pb2.StreamingInputCallResponse()
+    response = response_pb2.StreamingInputCallResponse()
     aggregated_payload_size = 0
     for request in request_iter:
       aggregated_payload_size += len(request.payload.payload_compressable)
@@ -111,8 +114,8 @@ class _ServicerMethods(object):
   def FullDuplexCall(self, request_iter, unused_rpc_context):
     for request in request_iter:
       for parameter in request.response_parameters:
-        response = self._response_pb2.StreamingOutputCallResponse()
-        response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+        response = response_pb2.StreamingOutputCallResponse()
+        response.payload.payload_type = payload_pb2.COMPRESSABLE
         response.payload.payload_compressable = 'a' * parameter.size
         self._control()
         yield response
@@ -121,8 +124,8 @@ class _ServicerMethods(object):
     responses = []
     for request in request_iter:
       for parameter in request.response_parameters:
-        response = self._response_pb2.StreamingOutputCallResponse()
-        response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+        response = response_pb2.StreamingOutputCallResponse()
+        response.payload.payload_type = payload_pb2.COMPRESSABLE
         response.payload.payload_compressable = 'a' * parameter.size
         self._control()
         responses.append(response)
@@ -142,18 +145,13 @@ class _Service(
   """
       
 
-def _CreateService(service_pb2, response_pb2, payload_pb2):
+def _CreateService():
   """Provides a servicer backend and a stub.
 
-  Args:
-    service_pb2: The service_pb2 module generated by this test.
-    response_pb2: The response_pb2 module generated by this test.
-    payload_pb2: The payload_pb2 module generated by this test.
-
   Returns:
     A _Service with which to test RPCs.
   """
-  servicer_methods = _ServicerMethods(response_pb2, payload_pb2)
+  servicer_methods = _ServicerMethods()
 
   class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
 
@@ -173,7 +171,7 @@ def _CreateService(service_pb2, response_pb2, payload_pb2):
       return servicer_methods.HalfDuplexCall(request_iter, context)
 
   server = grpc.server(
-      (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+      futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
   getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
   port = server.add_insecure_port('[::]:0')
   server.start()
@@ -182,12 +180,9 @@ def _CreateService(service_pb2, response_pb2, payload_pb2):
   return _Service(servicer_methods, server, stub)
 
 
-def _CreateIncompleteService(service_pb2):
+def _CreateIncompleteService():
   """Provides a servicer backend that fails to implement methods and its stub.
 
-  Args:
-    service_pb2: The service_pb2 module generated by this test.
-
   Returns:
     A _Service with which to test RPCs. The returned _Service's
       servicer_methods implements none of the methods required of it.
@@ -197,7 +192,7 @@ def _CreateIncompleteService(service_pb2):
     pass
 
   server = grpc.server(
-      (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+      futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
   getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
   port = server.add_insecure_port('[::]:0')
   server.start()
@@ -206,7 +201,7 @@ def _CreateIncompleteService(service_pb2):
   return _Service(None, server, stub)
 
 
-def _streaming_input_request_iterator(request_pb2, payload_pb2):
+def _streaming_input_request_iterator():
   for _ in range(3):
     request = request_pb2.StreamingInputCallRequest()
     request.payload.payload_type = payload_pb2.COMPRESSABLE
@@ -214,7 +209,7 @@ def _streaming_input_request_iterator(request_pb2, payload_pb2):
     yield request
 
 
-def _streaming_output_request(request_pb2):
+def _streaming_output_request():
   request = request_pb2.StreamingOutputCallRequest()
   sizes = [1, 2, 3]
   request.response_parameters.add(size=sizes[0], interval_us=0)
@@ -223,7 +218,7 @@ def _streaming_output_request(request_pb2):
   return request
 
 
-def _full_duplex_request_iterator(request_pb2):
+def _full_duplex_request_iterator():
   request = request_pb2.StreamingOutputCallRequest()
   request.response_parameters.add(size=1, interval_us=0)
   yield request
@@ -241,102 +236,40 @@ class PythonPluginTest(unittest.TestCase):
   methods and does not exist for response-streaming methods.
   """
 
-  def setUp(self):
-    # Assume that the appropriate protoc and grpc_python_plugins are on the
-    # path.
-    protoc_command = 'protoc'
-    protoc_plugin_filename = distutils.spawn.find_executable(
-        'grpc_python_plugin')
-    if not os.path.isfile(protoc_command):
-      # Assume that if we haven't built protoc that it's on the system.
-      protoc_command = 'protoc'
-
-    # Ensure that the output directory exists.
-    self.outdir = tempfile.mkdtemp()
-
-    # Find all proto files
-    paths = []
-    root_dir = os.path.dirname(os.path.realpath(__file__))
-    proto_dir = os.path.join(root_dir, 'protos')
-    for walk_root, _, filenames in os.walk(proto_dir):
-      for filename in filenames:
-        if filename.endswith('.proto'):
-          path = os.path.join(walk_root, filename)
-          paths.append(path)
-
-    # Invoke protoc with the plugin.
-    cmd = [
-        protoc_command,
-        '--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
-        '-I %s' % root_dir,
-        '--python_out=%s' % self.outdir,
-        '--python-grpc_out=%s' % self.outdir
-    ] + paths
-    subprocess.check_call(' '.join(cmd), shell=True, env=os.environ,
-                          cwd=os.path.dirname(os.path.realpath(__file__)))
-
-    # Generated proto directories dont include __init__.py, but
-    # these are needed for python package resolution
-    for walk_root, _, _ in os.walk(os.path.join(self.outdir, 'protos')):
-      path = os.path.join(walk_root, '__init__.py')
-      open(path, 'a').close()
-
-    sys.path.insert(0, self.outdir)
-
-    import protos.payload.test_payload_pb2 as payload_pb2
-    import protos.requests.r.test_requests_pb2 as request_pb2
-    import protos.responses.test_responses_pb2 as response_pb2
-    import protos.service.test_service_pb2 as service_pb2
-    self._payload_pb2 = payload_pb2
-    self._request_pb2 = request_pb2
-    self._response_pb2 = response_pb2
-    self._service_pb2 = service_pb2
-
-  def tearDown(self):
-    try:
-      shutil.rmtree(self.outdir)
-    except OSError as exc:
-      if exc.errno != errno.ENOENT:
-        raise
-    sys.path.remove(self.outdir)
-
   def testImportAttributes(self):
     # check that we can access the generated module and its members.
     self.assertIsNotNone(
-        getattr(self._service_pb2, STUB_IDENTIFIER, None))
+        getattr(service_pb2, STUB_IDENTIFIER, None))
     self.assertIsNotNone(
-        getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
+        getattr(service_pb2, SERVICER_IDENTIFIER, None))
     self.assertIsNotNone(
-        getattr(self._service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
+        getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
 
   def testUpDown(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     self.assertIsNotNone(service.servicer_methods)
     self.assertIsNotNone(service.server)
     self.assertIsNotNone(service.stub)
 
   def testIncompleteServicer(self):
-    service = _CreateIncompleteService(self._service_pb2)
-    request = self._request_pb2.SimpleRequest(response_size=13)
+    service = _CreateIncompleteService()
+    request = request_pb2.SimpleRequest(response_size=13)
     with self.assertRaises(grpc.RpcError) as exception_context:
       service.stub.UnaryCall(request)
     self.assertIs(
         exception_context.exception.code(), grpc.StatusCode.UNIMPLEMENTED)
 
   def testUnaryCall(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = self._request_pb2.SimpleRequest(response_size=13)
+    service = _CreateService()
+    request = request_pb2.SimpleRequest(response_size=13)
     response = service.stub.UnaryCall(request)
     expected_response = service.servicer_methods.UnaryCall(
         request, 'not a real context!')
     self.assertEqual(expected_response, response)
 
   def testUnaryCallFuture(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = self._request_pb2.SimpleRequest(response_size=13)
+    service = _CreateService()
+    request = request_pb2.SimpleRequest(response_size=13)
     # Check that the call does not block waiting for the server to respond.
     with service.servicer_methods.pause():
       response_future = service.stub.UnaryCall.future(request)
@@ -346,9 +279,8 @@ class PythonPluginTest(unittest.TestCase):
     self.assertEqual(expected_response, response)
 
   def testUnaryCallFutureExpired(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = self._request_pb2.SimpleRequest(response_size=13)
+    service = _CreateService()
+    request = request_pb2.SimpleRequest(response_size=13)
     with service.servicer_methods.pause():
       response_future = service.stub.UnaryCall.future(
           request, timeout=test_constants.SHORT_TIMEOUT)
@@ -359,9 +291,8 @@ class PythonPluginTest(unittest.TestCase):
     self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
 
   def testUnaryCallFutureCancelled(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = self._request_pb2.SimpleRequest(response_size=13)
+    service = _CreateService()
+    request = request_pb2.SimpleRequest(response_size=13)
     with service.servicer_methods.pause():
       response_future = service.stub.UnaryCall.future(request)
       response_future.cancel()
@@ -369,18 +300,16 @@ class PythonPluginTest(unittest.TestCase):
     self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
 
   def testUnaryCallFutureFailed(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = self._request_pb2.SimpleRequest(response_size=13)
+    service = _CreateService()
+    request = request_pb2.SimpleRequest(response_size=13)
     with service.servicer_methods.fail():
       response_future = service.stub.UnaryCall.future(request)
       self.assertIsNotNone(response_future.exception())
     self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
 
   def testStreamingOutputCall(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = _streaming_output_request(self._request_pb2)
+    service = _CreateService()
+    request = _streaming_output_request()
     responses = service.stub.StreamingOutputCall(request)
     expected_responses = service.servicer_methods.StreamingOutputCall(
         request, 'not a real RpcContext!')
@@ -389,9 +318,8 @@ class PythonPluginTest(unittest.TestCase):
       self.assertEqual(expected_response, response)
 
   def testStreamingOutputCallExpired(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = _streaming_output_request(self._request_pb2)
+    service = _CreateService()
+    request = _streaming_output_request()
     with service.servicer_methods.pause():
       responses = service.stub.StreamingOutputCall(
           request, timeout=test_constants.SHORT_TIMEOUT)
@@ -401,9 +329,8 @@ class PythonPluginTest(unittest.TestCase):
         exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
 
   def testStreamingOutputCallCancelled(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = _streaming_output_request(self._request_pb2)
+    service = _CreateService()
+    request = _streaming_output_request()
     responses = service.stub.StreamingOutputCall(request)
     next(responses)
     responses.cancel()
@@ -412,9 +339,8 @@ class PythonPluginTest(unittest.TestCase):
     self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
 
   def testStreamingOutputCallFailed(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request = _streaming_output_request(self._request_pb2)
+    service = _CreateService()
+    request = _streaming_output_request()
     with service.servicer_methods.fail():
       responses = service.stub.StreamingOutputCall(request)
       self.assertIsNotNone(responses)
@@ -423,36 +349,30 @@ class PythonPluginTest(unittest.TestCase):
     self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
 
   def testStreamingInputCall(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     response = service.stub.StreamingInputCall(
-        _streaming_input_request_iterator(
-            self._request_pb2, self._payload_pb2))
+        _streaming_input_request_iterator())
     expected_response = service.servicer_methods.StreamingInputCall(
-        _streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
+        _streaming_input_request_iterator(),
         'not a real RpcContext!')
     self.assertEqual(expected_response, response)
 
   def testStreamingInputCallFuture(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     with service.servicer_methods.pause():
       response_future = service.stub.StreamingInputCall.future(
-          _streaming_input_request_iterator(
-              self._request_pb2, self._payload_pb2))
+          _streaming_input_request_iterator())
     response = response_future.result()
     expected_response = service.servicer_methods.StreamingInputCall(
-        _streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
+        _streaming_input_request_iterator(),
         'not a real RpcContext!')
     self.assertEqual(expected_response, response)
 
   def testStreamingInputCallFutureExpired(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     with service.servicer_methods.pause():
       response_future = service.stub.StreamingInputCall.future(
-          _streaming_input_request_iterator(
-              self._request_pb2, self._payload_pb2),
+          _streaming_input_request_iterator(),
           timeout=test_constants.SHORT_TIMEOUT)
       with self.assertRaises(grpc.RpcError) as exception_context:
         response_future.result()
@@ -463,43 +383,37 @@ class PythonPluginTest(unittest.TestCase):
         exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
 
   def testStreamingInputCallFutureCancelled(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     with service.servicer_methods.pause():
       response_future = service.stub.StreamingInputCall.future(
-          _streaming_input_request_iterator(
-              self._request_pb2, self._payload_pb2))
+          _streaming_input_request_iterator())
       response_future.cancel()
     self.assertTrue(response_future.cancelled())
     with self.assertRaises(grpc.FutureCancelledError):
       response_future.result()
 
   def testStreamingInputCallFutureFailed(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     with service.servicer_methods.fail():
       response_future = service.stub.StreamingInputCall.future(
-          _streaming_input_request_iterator(
-              self._request_pb2, self._payload_pb2))
+          _streaming_input_request_iterator())
       self.assertIsNotNone(response_future.exception())
       self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
 
   def testFullDuplexCall(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     responses = service.stub.FullDuplexCall(
-        _full_duplex_request_iterator(self._request_pb2))
+        _full_duplex_request_iterator())
     expected_responses = service.servicer_methods.FullDuplexCall(
-        _full_duplex_request_iterator(self._request_pb2),
+        _full_duplex_request_iterator(),
         'not a real RpcContext!')
     for expected_response, response in moves.zip_longest(
         expected_responses, responses):
       self.assertEqual(expected_response, response)
 
   def testFullDuplexCallExpired(self):
-    request_iterator = _full_duplex_request_iterator(self._request_pb2)
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    request_iterator = _full_duplex_request_iterator()
+    service = _CreateService()
     with service.servicer_methods.pause():
       responses = service.stub.FullDuplexCall(
           request_iterator, timeout=test_constants.SHORT_TIMEOUT)
@@ -509,9 +423,8 @@ class PythonPluginTest(unittest.TestCase):
         exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
 
   def testFullDuplexCallCancelled(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
-    request_iterator = _full_duplex_request_iterator(self._request_pb2)
+    service = _CreateService()
+    request_iterator = _full_duplex_request_iterator()
     responses = service.stub.FullDuplexCall(request_iterator)
     next(responses)
     responses.cancel()
@@ -521,9 +434,8 @@ class PythonPluginTest(unittest.TestCase):
         exception_context.exception.code(), grpc.StatusCode.CANCELLED)
 
   def testFullDuplexCallFailed(self):
-    request_iterator = _full_duplex_request_iterator(self._request_pb2)
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    request_iterator = _full_duplex_request_iterator()
+    service = _CreateService()
     with service.servicer_methods.fail():
       responses = service.stub.FullDuplexCall(request_iterator)
       with self.assertRaises(grpc.RpcError) as exception_context:
@@ -531,13 +443,12 @@ class PythonPluginTest(unittest.TestCase):
     self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
 
   def testHalfDuplexCall(self):
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     def half_duplex_request_iterator():
-      request = self._request_pb2.StreamingOutputCallRequest()
+      request = request_pb2.StreamingOutputCallRequest()
       request.response_parameters.add(size=1, interval_us=0)
       yield request
-      request = self._request_pb2.StreamingOutputCallRequest()
+      request = request_pb2.StreamingOutputCallRequest()
       request.response_parameters.add(size=2, interval_us=0)
       request.response_parameters.add(size=3, interval_us=0)
       yield request
@@ -561,14 +472,13 @@ class PythonPluginTest(unittest.TestCase):
         wait_cell[0] = False
         condition.notify_all()
     def half_duplex_request_iterator():
-      request = self._request_pb2.StreamingOutputCallRequest()
+      request = request_pb2.StreamingOutputCallRequest()
       request.response_parameters.add(size=1, interval_us=0)
       yield request
       with condition:
         while wait_cell[0]:
           condition.wait()
-    service = _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2)
+    service = _CreateService()
     with wait():
       responses = service.stub.HalfDuplexCall(
           half_duplex_request_iterator(), timeout=test_constants.SHORT_TIMEOUT)
diff --git a/src/python/grpcio/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
similarity index 64%
rename from src/python/grpcio/tests/protoc_plugin/beta_python_plugin_test.py
rename to src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
index 7466f880597a388476936b0466c996590c83be96..1eba9c9354311babfb8b3e1abcc02c3d133b319f 100644
--- a/src/python/grpcio/tests/protoc_plugin/beta_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
@@ -50,6 +50,11 @@ from grpc.framework.foundation import future
 from grpc.framework.interfaces.face import face
 from tests.unit.framework.common import test_constants
 
+import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
+import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
+import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
+import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2
+
 # Identifiers of entities we expect to find in the generated module.
 SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
 STUB_IDENTIFIER = 'BetaTestServiceStub'
@@ -59,12 +64,10 @@ STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
 
 class _ServicerMethods(object):
 
-  def __init__(self, response_pb2, payload_pb2):
+  def __init__(self):
     self._condition = threading.Condition()
     self._paused = False
     self._fail = False
-    self._response_pb2 = response_pb2
-    self._payload_pb2 = payload_pb2
 
   @contextlib.contextmanager
   def pause(self):  # pylint: disable=invalid-name
@@ -91,22 +94,22 @@ class _ServicerMethods(object):
         self._condition.wait()
 
   def UnaryCall(self, request, unused_rpc_context):
-    response = self._response_pb2.SimpleResponse()
-    response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+    response = response_pb2.SimpleResponse()
+    response.payload.payload_type = payload_pb2.COMPRESSABLE
     response.payload.payload_compressable = 'a' * request.response_size
     self._control()
     return response
 
   def StreamingOutputCall(self, request, unused_rpc_context):
     for parameter in request.response_parameters:
-      response = self._response_pb2.StreamingOutputCallResponse()
-      response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+      response = response_pb2.StreamingOutputCallResponse()
+      response.payload.payload_type = payload_pb2.COMPRESSABLE
       response.payload.payload_compressable = 'a' * parameter.size
       self._control()
       yield response
 
   def StreamingInputCall(self, request_iter, unused_rpc_context):
-    response = self._response_pb2.StreamingInputCallResponse()
+    response = response_pb2.StreamingInputCallResponse()
     aggregated_payload_size = 0
     for request in request_iter:
       aggregated_payload_size += len(request.payload.payload_compressable)
@@ -117,8 +120,8 @@ class _ServicerMethods(object):
   def FullDuplexCall(self, request_iter, unused_rpc_context):
     for request in request_iter:
       for parameter in request.response_parameters:
-        response = self._response_pb2.StreamingOutputCallResponse()
-        response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+        response = response_pb2.StreamingOutputCallResponse()
+        response.payload.payload_type = payload_pb2.COMPRESSABLE
         response.payload.payload_compressable = 'a' * parameter.size
         self._control()
         yield response
@@ -127,8 +130,8 @@ class _ServicerMethods(object):
     responses = []
     for request in request_iter:
       for parameter in request.response_parameters:
-        response = self._response_pb2.StreamingOutputCallResponse()
-        response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+        response = response_pb2.StreamingOutputCallResponse()
+        response.payload.payload_type = payload_pb2.COMPRESSABLE
         response.payload.payload_compressable = 'a' * parameter.size
         self._control()
         responses.append(response)
@@ -137,23 +140,18 @@ class _ServicerMethods(object):
 
 
 @contextlib.contextmanager
-def _CreateService(service_pb2, response_pb2, payload_pb2):
+def _CreateService():
   """Provides a servicer backend and a stub.
 
   The servicer is just the implementation of the actual servicer passed to the
   face player of the python RPC implementation; the two are detached.
 
-  Args:
-    service_pb2: The service_pb2 module generated by this test.
-    response_pb2: The response_pb2 module generated by this test
-    payload_pb2: The payload_pb2 module generated by this test
-
   Yields:
     A (servicer_methods, stub) pair where servicer_methods is the back-end of
       the service bound to the stub and and stub is the stub on which to invoke
       RPCs.
   """
-  servicer_methods = _ServicerMethods(response_pb2, payload_pb2)
+  servicer_methods = _ServicerMethods()
 
   class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
 
@@ -183,7 +181,7 @@ def _CreateService(service_pb2, response_pb2, payload_pb2):
 
 
 @contextlib.contextmanager
-def _CreateIncompleteService(service_pb2):
+def _CreateIncompleteService():
   """Provides a servicer backend that fails to implement methods and its stub.
 
   The servicer is just the implementation of the actual servicer passed to the
@@ -209,7 +207,7 @@ def _CreateIncompleteService(service_pb2):
   server.stop(0)
 
 
-def _streaming_input_request_iterator(request_pb2, payload_pb2):
+def _streaming_input_request_iterator():
   for _ in range(3):
     request = request_pb2.StreamingInputCallRequest()
     request.payload.payload_type = payload_pb2.COMPRESSABLE
@@ -217,7 +215,7 @@ def _streaming_input_request_iterator(request_pb2, payload_pb2):
     yield request
 
 
-def _streaming_output_request(request_pb2):
+def _streaming_output_request():
   request = request_pb2.StreamingOutputCallRequest()
   sizes = [1, 2, 3]
   request.response_parameters.add(size=sizes[0], interval_us=0)
@@ -226,7 +224,7 @@ def _streaming_output_request(request_pb2):
   return request
 
 
-def _full_duplex_request_iterator(request_pb2):
+def _full_duplex_request_iterator():
   request = request_pb2.StreamingOutputCallRequest()
   request.response_parameters.add(size=1, interval_us=0)
   yield request
@@ -244,101 +242,39 @@ class PythonPluginTest(unittest.TestCase):
   methods and does not exist for response-streaming methods.
   """
 
-  def setUp(self):
-    # Assume that the appropriate protoc and grpc_python_plugins are on the
-    # path.
-    protoc_command = 'protoc'
-    protoc_plugin_filename = distutils.spawn.find_executable(
-        'grpc_python_plugin')
-    if not os.path.isfile(protoc_command):
-      # Assume that if we haven't built protoc that it's on the system.
-      protoc_command = 'protoc'
-
-    # Ensure that the output directory exists.
-    self.outdir = tempfile.mkdtemp()
-
-    # Find all proto files
-    paths = []
-    root_dir = os.path.dirname(os.path.realpath(__file__))
-    proto_dir = os.path.join(root_dir, 'protos')
-    for walk_root, _, filenames in os.walk(proto_dir):
-      for filename in filenames:
-        if filename.endswith('.proto'):
-          path = os.path.join(walk_root, filename)
-          paths.append(path)
-
-    # Invoke protoc with the plugin.
-    cmd = [
-        protoc_command,
-        '--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
-        '-I %s' % root_dir,
-        '--python_out=%s' % self.outdir,
-        '--python-grpc_out=%s' % self.outdir
-    ] + paths
-    subprocess.check_call(' '.join(cmd), shell=True, env=os.environ,
-                          cwd=os.path.dirname(os.path.realpath(__file__)))
-
-    # Generated proto directories dont include __init__.py, but
-    # these are needed for python package resolution
-    for walk_root, _, _ in os.walk(os.path.join(self.outdir, 'protos')):
-      path = os.path.join(walk_root, '__init__.py')
-      open(path, 'a').close()
-
-    sys.path.insert(0, self.outdir)
-
-    import protos.payload.test_payload_pb2 as payload_pb2  # pylint: disable=g-import-not-at-top
-    import protos.requests.r.test_requests_pb2 as request_pb2  # pylint: disable=g-import-not-at-top
-    import protos.responses.test_responses_pb2 as response_pb2  # pylint: disable=g-import-not-at-top
-    import protos.service.test_service_pb2 as service_pb2  # pylint: disable=g-import-not-at-top
-    self._payload_pb2 = payload_pb2
-    self._request_pb2 = request_pb2
-    self._response_pb2 = response_pb2
-    self._service_pb2 = service_pb2
-
-  def tearDown(self):
-    try:
-      shutil.rmtree(self.outdir)
-    except OSError as exc:
-      if exc.errno != errno.ENOENT:
-        raise
-    sys.path.remove(self.outdir)
-
   def testImportAttributes(self):
     # check that we can access the generated module and its members.
     self.assertIsNotNone(
-        getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
+        getattr(service_pb2, SERVICER_IDENTIFIER, None))
     self.assertIsNotNone(
-        getattr(self._service_pb2, STUB_IDENTIFIER, None))
+        getattr(service_pb2, STUB_IDENTIFIER, None))
     self.assertIsNotNone(
-        getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
+        getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None))
     self.assertIsNotNone(
-        getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
+        getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None))
 
   def testUpDown(self):
-    with _CreateService(
-        self._service_pb2, self._response_pb2, self._payload_pb2):
-      self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateService():
+      request_pb2.SimpleRequest(response_size=13)
 
   def testIncompleteServicer(self):
-    with _CreateIncompleteService(self._service_pb2) as (_, stub):
-      request = self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateIncompleteService() as (_, stub):
+      request = request_pb2.SimpleRequest(response_size=13)
       try:
         stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
       except face.AbortionError as error:
         self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED, error.code)
 
   def testUnaryCall(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateService() as (methods, stub):
+      request = request_pb2.SimpleRequest(response_size=13)
       response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
     expected_response = methods.UnaryCall(request, 'not a real context!')
     self.assertEqual(expected_response, response)
 
   def testUnaryCallFuture(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateService() as (methods, stub):
+      request = request_pb2.SimpleRequest(response_size=13)
       # Check that the call does not block waiting for the server to respond.
       with methods.pause():
         response_future = stub.UnaryCall.future(
@@ -348,9 +284,8 @@ class PythonPluginTest(unittest.TestCase):
     self.assertEqual(expected_response, response)
 
   def testUnaryCallFutureExpired(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateService() as (methods, stub):
+      request = request_pb2.SimpleRequest(response_size=13)
       with methods.pause():
         response_future = stub.UnaryCall.future(
             request, test_constants.SHORT_TIMEOUT)
@@ -358,27 +293,24 @@ class PythonPluginTest(unittest.TestCase):
           response_future.result()
 
   def testUnaryCallFutureCancelled(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateService() as (methods, stub):
+      request = request_pb2.SimpleRequest(response_size=13)
       with methods.pause():
         response_future = stub.UnaryCall.future(request, 1)
         response_future.cancel()
         self.assertTrue(response_future.cancelled())
 
   def testUnaryCallFutureFailed(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = self._request_pb2.SimpleRequest(response_size=13)
+    with _CreateService() as (methods, stub):
+      request = request_pb2.SimpleRequest(response_size=13)
       with methods.fail():
         response_future = stub.UnaryCall.future(
             request, test_constants.LONG_TIMEOUT)
         self.assertIsNotNone(response_future.exception())
 
   def testStreamingOutputCall(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = _streaming_output_request(self._request_pb2)
+    with _CreateService() as (methods, stub):
+      request = _streaming_output_request()
       responses = stub.StreamingOutputCall(
           request, test_constants.LONG_TIMEOUT)
       expected_responses = methods.StreamingOutputCall(
@@ -388,9 +320,8 @@ class PythonPluginTest(unittest.TestCase):
         self.assertEqual(expected_response, response)
 
   def testStreamingOutputCallExpired(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = _streaming_output_request(self._request_pb2)
+    with _CreateService() as (methods, stub):
+      request = _streaming_output_request()
       with methods.pause():
         responses = stub.StreamingOutputCall(
             request, test_constants.SHORT_TIMEOUT)
@@ -398,9 +329,8 @@ class PythonPluginTest(unittest.TestCase):
           list(responses)
 
   def testStreamingOutputCallCancelled(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = _streaming_output_request(self._request_pb2)
+    with _CreateService() as (methods, stub):
+      request = _streaming_output_request()
       responses = stub.StreamingOutputCall(
           request, test_constants.LONG_TIMEOUT)
       next(responses)
@@ -409,9 +339,8 @@ class PythonPluginTest(unittest.TestCase):
         next(responses)
 
   def testStreamingOutputCallFailed(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request = _streaming_output_request(self._request_pb2)
+    with _CreateService() as (methods, stub):
+      request = _streaming_output_request()
       with methods.fail():
         responses = stub.StreamingOutputCall(request, 1)
         self.assertIsNotNone(responses)
@@ -419,38 +348,32 @@ class PythonPluginTest(unittest.TestCase):
           next(responses)
 
   def testStreamingInputCall(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       response = stub.StreamingInputCall(
-          _streaming_input_request_iterator(
-              self._request_pb2, self._payload_pb2),
+          _streaming_input_request_iterator(),
           test_constants.LONG_TIMEOUT)
     expected_response = methods.StreamingInputCall(
-        _streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
+        _streaming_input_request_iterator(),
         'not a real RpcContext!')
     self.assertEqual(expected_response, response)
 
   def testStreamingInputCallFuture(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       with methods.pause():
         response_future = stub.StreamingInputCall.future(
-            _streaming_input_request_iterator(
-                self._request_pb2, self._payload_pb2),
+            _streaming_input_request_iterator(),
             test_constants.LONG_TIMEOUT)
       response = response_future.result()
     expected_response = methods.StreamingInputCall(
-        _streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
+        _streaming_input_request_iterator(),
         'not a real RpcContext!')
     self.assertEqual(expected_response, response)
 
   def testStreamingInputCallFutureExpired(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       with methods.pause():
         response_future = stub.StreamingInputCall.future(
-            _streaming_input_request_iterator(
-                self._request_pb2, self._payload_pb2),
+            _streaming_input_request_iterator(),
             test_constants.SHORT_TIMEOUT)
         with self.assertRaises(face.ExpirationError):
           response_future.result()
@@ -458,12 +381,10 @@ class PythonPluginTest(unittest.TestCase):
             response_future.exception(), face.ExpirationError)
 
   def testStreamingInputCallFutureCancelled(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       with methods.pause():
         response_future = stub.StreamingInputCall.future(
-            _streaming_input_request_iterator(
-                self._request_pb2, self._payload_pb2),
+            _streaming_input_request_iterator(),
             test_constants.LONG_TIMEOUT)
         response_future.cancel()
         self.assertTrue(response_future.cancelled())
@@ -471,32 +392,28 @@ class PythonPluginTest(unittest.TestCase):
         response_future.result()
 
   def testStreamingInputCallFutureFailed(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       with methods.fail():
         response_future = stub.StreamingInputCall.future(
-            _streaming_input_request_iterator(
-                self._request_pb2, self._payload_pb2),
+            _streaming_input_request_iterator(),
             test_constants.LONG_TIMEOUT)
         self.assertIsNotNone(response_future.exception())
 
   def testFullDuplexCall(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       responses = stub.FullDuplexCall(
-          _full_duplex_request_iterator(self._request_pb2),
+          _full_duplex_request_iterator(),
           test_constants.LONG_TIMEOUT)
       expected_responses = methods.FullDuplexCall(
-          _full_duplex_request_iterator(self._request_pb2),
+          _full_duplex_request_iterator(),
           'not a real RpcContext!')
       for expected_response, response in moves.zip_longest(
           expected_responses, responses):
         self.assertEqual(expected_response, response)
 
   def testFullDuplexCallExpired(self):
-    request_iterator = _full_duplex_request_iterator(self._request_pb2)
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    request_iterator = _full_duplex_request_iterator()
+    with _CreateService() as (methods, stub):
       with methods.pause():
         responses = stub.FullDuplexCall(
             request_iterator, test_constants.SHORT_TIMEOUT)
@@ -504,9 +421,8 @@ class PythonPluginTest(unittest.TestCase):
           list(responses)
 
   def testFullDuplexCallCancelled(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
-      request_iterator = _full_duplex_request_iterator(self._request_pb2)
+    with _CreateService() as (methods, stub):
+      request_iterator = _full_duplex_request_iterator()
       responses = stub.FullDuplexCall(
           request_iterator, test_constants.LONG_TIMEOUT)
       next(responses)
@@ -515,9 +431,8 @@ class PythonPluginTest(unittest.TestCase):
         next(responses)
 
   def testFullDuplexCallFailed(self):
-    request_iterator = _full_duplex_request_iterator(self._request_pb2)
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    request_iterator = _full_duplex_request_iterator()
+    with _CreateService() as (methods, stub):
       with methods.fail():
         responses = stub.FullDuplexCall(
             request_iterator, test_constants.LONG_TIMEOUT)
@@ -526,13 +441,12 @@ class PythonPluginTest(unittest.TestCase):
           next(responses)
 
   def testHalfDuplexCall(self):
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       def half_duplex_request_iterator():
-        request = self._request_pb2.StreamingOutputCallRequest()
+        request = request_pb2.StreamingOutputCallRequest()
         request.response_parameters.add(size=1, interval_us=0)
         yield request
-        request = self._request_pb2.StreamingOutputCallRequest()
+        request = request_pb2.StreamingOutputCallRequest()
         request.response_parameters.add(size=2, interval_us=0)
         request.response_parameters.add(size=3, interval_us=0)
         yield request
@@ -557,14 +471,13 @@ class PythonPluginTest(unittest.TestCase):
         wait_cell[0] = False
         condition.notify_all()
     def half_duplex_request_iterator():
-      request = self._request_pb2.StreamingOutputCallRequest()
+      request = request_pb2.StreamingOutputCallRequest()
       request.response_parameters.add(size=1, interval_us=0)
       yield request
       with condition:
         while wait_cell[0]:
           condition.wait()
-    with _CreateService(self._service_pb2, self._response_pb2,
-                        self._payload_pb2) as (methods, stub):
+    with _CreateService() as (methods, stub):
       with wait():
         responses = stub.HalfDuplexCall(
             half_duplex_request_iterator(), test_constants.SHORT_TIMEOUT)
@@ -574,5 +487,4 @@ class PythonPluginTest(unittest.TestCase):
 
 
 if __name__ == '__main__':
-  #os.chdir(os.path.dirname(sys.argv[0]))
   unittest.main(verbosity=2)
diff --git a/src/python/grpcio/grpc/framework/core/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py
similarity index 100%
rename from src/python/grpcio/grpc/framework/core/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py
diff --git a/src/python/grpcio/grpc/framework/crust/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py
similarity index 100%
rename from src/python/grpcio/grpc/framework/crust/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py
diff --git a/src/python/grpcio/tests/protoc_plugin/protos/payload/test_payload.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/test_payload.proto
similarity index 100%
rename from src/python/grpcio/tests/protoc_plugin/protos/payload/test_payload.proto
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/payload/test_payload.proto
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py
similarity index 100%
rename from src/python/grpcio/grpc/framework/interfaces/links/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py
diff --git a/src/python/grpcio/tests/interop/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/interop/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py
diff --git a/src/python/grpcio/tests/protoc_plugin/protos/requests/r/test_requests.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/test_requests.proto
similarity index 97%
rename from src/python/grpcio/tests/protoc_plugin/protos/requests/r/test_requests.proto
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/test_requests.proto
index 54105df6a56c4bc717a381e896550ea43c974dcd..365ae738e14a17dcc8d14407d6d8bbbd1f98645e 100644
--- a/src/python/grpcio/tests/protoc_plugin/protos/requests/r/test_requests.proto
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/test_requests.proto
@@ -29,7 +29,7 @@
 
 syntax = "proto3";
 
-import "protos/payload/test_payload.proto";
+import "tests/protoc_plugin/protos/payload/test_payload.proto";
 
 package grpc_protoc_plugin;
 
diff --git a/src/python/grpcio/tests/protoc_plugin/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/protoc_plugin/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py
diff --git a/src/python/grpcio/tests/protoc_plugin/protos/responses/test_responses.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/test_responses.proto
similarity index 96%
rename from src/python/grpcio/tests/protoc_plugin/protos/responses/test_responses.proto
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/responses/test_responses.proto
index 734fbda86e9411db250317f0d0256a5bb2b9fe72..1d54d58db1efce231b1a40e46d74374561fcc19d 100644
--- a/src/python/grpcio/tests/protoc_plugin/protos/responses/test_responses.proto
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/test_responses.proto
@@ -29,7 +29,7 @@
 
 syntax = "proto3";
 
-import "protos/payload/test_payload.proto";
+import "tests/protoc_plugin/protos/payload/test_payload.proto";
 
 package grpc_protoc_plugin;
 
diff --git a/src/python/grpcio/tests/unit/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/__init__.py
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py
diff --git a/src/python/grpcio/tests/protoc_plugin/protos/service/test_service.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/test_service.proto
similarity index 95%
rename from src/python/grpcio/tests/protoc_plugin/protos/service/test_service.proto
rename to src/python/grpcio_tests/tests/protoc_plugin/protos/service/test_service.proto
index fe715ee7f9205d6ec938d1659230e7daea1e919a..003dbbb9eb49becb7f590466f8c5748e3c98b54e 100644
--- a/src/python/grpcio/tests/protoc_plugin/protos/service/test_service.proto
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/test_service.proto
@@ -29,8 +29,8 @@
 
 syntax = "proto3";
 
-import "protos/requests/r/test_requests.proto";
-import "protos/responses/test_responses.proto";
+import "tests/protoc_plugin/protos/requests/r/test_requests.proto";
+import "tests/protoc_plugin/protos/responses/test_responses.proto";
 
 package grpc_protoc_plugin;
 
diff --git a/src/python/grpcio/tests/qps/__init__.py b/src/python/grpcio_tests/tests/qps/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/qps/__init__.py
rename to src/python/grpcio_tests/tests/qps/__init__.py
diff --git a/src/python/grpcio/tests/qps/benchmark_client.py b/src/python/grpcio_tests/tests/qps/benchmark_client.py
similarity index 88%
rename from src/python/grpcio/tests/qps/benchmark_client.py
rename to src/python/grpcio_tests/tests/qps/benchmark_client.py
index 080281415d470596722d78839e60a560814de35d..83b46c914e7f89d8d8966c29d573d69b6f310413 100644
--- a/src/python/grpcio/tests/qps/benchmark_client.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_client.py
@@ -37,16 +37,23 @@ from concurrent import futures
 from six.moves import queue
 
 import grpc
-from grpc.beta import implementations
-from grpc.framework.interfaces.face import face
 from src.proto.grpc.testing import messages_pb2
 from src.proto.grpc.testing import services_pb2
 from tests.unit import resources
-from tests.unit.beta import test_utilities
+from tests.unit import test_common
 
 _TIMEOUT = 60 * 60 * 24
 
 
+class GenericStub(object):
+
+  def __init__(self, channel):
+    self.UnaryCall = channel.unary_unary(
+        '/grpc.testing.BenchmarkService/UnaryCall')
+    self.StreamingCall = channel.stream_stream(
+        '/grpc.testing.BenchmarkService/StreamingCall')
+
+
 class BenchmarkClient:
   """Benchmark client interface that exposes a non-blocking send_request()."""
 
@@ -54,15 +61,12 @@ class BenchmarkClient:
 
   def __init__(self, server, config, hist):
     # Create the stub
-    host, port = server.split(':')
-    port = int(port)
     if config.HasField('security_params'):
-      creds = implementations.ssl_channel_credentials(
-          resources.test_root_certificates())
-      channel = test_utilities.not_really_secure_channel(
-          host, port, creds, config.security_params.server_host_override)
+      creds = grpc.ssl_channel_credentials(resources.test_root_certificates())
+      channel = test_common.test_secure_channel(
+        server, creds, config.security_params.server_host_override)
     else:
-      channel = implementations.insecure_channel(host, port)
+      channel = grpc.insecure_channel(server)
 
     connected_event = threading.Event()
     def wait_for_ready(connectivity):
@@ -73,7 +77,7 @@ class BenchmarkClient:
 
     if config.payload_config.WhichOneof('payload') == 'simple_params':
       self._generic = False
-      self._stub = services_pb2.beta_create_BenchmarkService_stub(channel)
+      self._stub = services_pb2.BenchmarkServiceStub(channel)
       payload = messages_pb2.Payload(
           body='\0' * config.payload_config.simple_params.req_size)
       self._request = messages_pb2.SimpleRequest(
@@ -81,7 +85,7 @@ class BenchmarkClient:
           response_size=config.payload_config.simple_params.resp_size)
     else:
       self._generic = True
-      self._stub = implementations.generic_stub(channel)
+      self._stub = GenericStub(channel)
       self._request = '\0' * config.payload_config.bytebuf_params.req_size
 
     self._hist = hist
@@ -166,13 +170,8 @@ class _SyncStream(object):
 
   def start(self):
     self._is_streaming = True
-    if self._generic:
-      stream_callable = self._stub.stream_stream(
-          'grpc.testing.BenchmarkService', 'StreamingCall')
-    else:
-      stream_callable = self._stub.StreamingCall
-
-    response_stream = stream_callable(self._request_generator(), _TIMEOUT)
+    response_stream = self._stub.StreamingCall(
+        self._request_generator(), _TIMEOUT)
     for _ in response_stream:
       self._handle_response(
           self, time.time() - self._send_time_queue.get_nowait())
diff --git a/src/python/grpcio/tests/qps/benchmark_server.py b/src/python/grpcio_tests/tests/qps/benchmark_server.py
similarity index 94%
rename from src/python/grpcio/tests/qps/benchmark_server.py
rename to src/python/grpcio_tests/tests/qps/benchmark_server.py
index 8cbf480d58bb8798cfd0ed740414f44e66fdd041..2b76b810cdd869dff0f221c50b60a7d42867c27a 100644
--- a/src/python/grpcio/tests/qps/benchmark_server.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_server.py
@@ -31,7 +31,7 @@ from src.proto.grpc.testing import messages_pb2
 from src.proto.grpc.testing import services_pb2
 
 
-class BenchmarkServer(services_pb2.BetaBenchmarkServiceServicer):
+class BenchmarkServer(services_pb2.BenchmarkServiceServicer):
   """Synchronous Server implementation for the Benchmark service."""
 
   def UnaryCall(self, request, context):
@@ -44,7 +44,7 @@ class BenchmarkServer(services_pb2.BetaBenchmarkServiceServicer):
       yield messages_pb2.SimpleResponse(payload=payload)
 
 
-class GenericBenchmarkServer(services_pb2.BetaBenchmarkServiceServicer):
+class GenericBenchmarkServer(services_pb2.BenchmarkServiceServicer):
   """Generic Server implementation for the Benchmark service."""
 
   def __init__(self, resp_size):
diff --git a/src/python/grpcio/tests/qps/client_runner.py b/src/python/grpcio_tests/tests/qps/client_runner.py
similarity index 100%
rename from src/python/grpcio/tests/qps/client_runner.py
rename to src/python/grpcio_tests/tests/qps/client_runner.py
diff --git a/src/python/grpcio/tests/qps/histogram.py b/src/python/grpcio_tests/tests/qps/histogram.py
similarity index 100%
rename from src/python/grpcio/tests/qps/histogram.py
rename to src/python/grpcio_tests/tests/qps/histogram.py
diff --git a/src/python/grpcio/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py
similarity index 91%
rename from src/python/grpcio/tests/qps/qps_worker.py
rename to src/python/grpcio_tests/tests/qps/qps_worker.py
index 16926379a5bca47a5553456a47018af9b784f337..2371ff095627cf19e96cec2618a3c3b8e4092559 100644
--- a/src/python/grpcio/tests/qps/qps_worker.py
+++ b/src/python/grpcio_tests/tests/qps/qps_worker.py
@@ -32,18 +32,21 @@
 import argparse
 import time
 
+from concurrent import futures
+import grpc
 from src.proto.grpc.testing import services_pb2
 
 from tests.qps import worker_server
 
 
 def run_worker_server(port):
+  server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
   servicer = worker_server.WorkerServer()
-  server = services_pb2.beta_create_WorkerService_server(servicer)
+  services_pb2.add_WorkerServiceServicer_to_server(servicer, server)
   server.add_insecure_port('[::]:{}'.format(port))
   server.start()
   servicer.wait_for_quit()
-  server.stop(2)
+  server.stop(0)
 
 
 if __name__ == '__main__':
diff --git a/src/python/grpcio/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py
similarity index 86%
rename from src/python/grpcio/tests/qps/worker_server.py
rename to src/python/grpcio_tests/tests/qps/worker_server.py
index d41f8377c2a118202707550587212a74e15e760e..46d542940f1079b049ddb783f2e48d2a6cb8d519 100644
--- a/src/python/grpcio/tests/qps/worker_server.py
+++ b/src/python/grpcio_tests/tests/qps/worker_server.py
@@ -32,8 +32,8 @@ import random
 import threading
 import time
 
-from grpc.beta import implementations
-from grpc.framework.interfaces.face import utilities
+from concurrent import futures
+import grpc
 from src.proto.grpc.testing import control_pb2
 from src.proto.grpc.testing import services_pb2
 from src.proto.grpc.testing import stats_pb2
@@ -45,7 +45,7 @@ from tests.qps import histogram
 from tests.unit import resources
 
 
-class WorkerServer(services_pb2.BetaWorkerServiceServicer):
+class WorkerServer(services_pb2.WorkerServiceServicer):
   """Python Worker Server implementation."""
 
   def __init__(self):
@@ -65,7 +65,7 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer):
       if request.mark.reset:
         start_time = end_time
       yield status
-    server.stop(0)
+    server.stop(None)
 
   def _get_server_status(self, start_time, end_time, port, cores):
     end_time = time.time()
@@ -76,25 +76,35 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer):
     return control_pb2.ServerStatus(stats=stats, port=port, cores=cores)
 
   def _create_server(self, config):
-    if config.server_type == control_pb2.SYNC_SERVER:
+    if config.async_server_threads == 0:
+      # This is the default concurrent.futures thread pool size, but
+      # None doesn't seem to work
+      server_threads = multiprocessing.cpu_count() * 5
+    else:
+      server_threads = config.async_server_threads
+    server = grpc.server(futures.ThreadPoolExecutor(
+        max_workers=server_threads))
+    if config.server_type == control_pb2.ASYNC_SERVER:
       servicer = benchmark_server.BenchmarkServer()
-      server = services_pb2.beta_create_BenchmarkService_server(servicer)
+      services_pb2.add_BenchmarkServiceServicer_to_server(servicer, server)
     elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
       resp_size = config.payload_config.bytebuf_params.resp_size
       servicer = benchmark_server.GenericBenchmarkServer(resp_size)
       method_implementations = {
-          ('grpc.testing.BenchmarkService', 'StreamingCall'):
-          utilities.stream_stream_inline(servicer.StreamingCall),
-          ('grpc.testing.BenchmarkService', 'UnaryCall'):
-          utilities.unary_unary_inline(servicer.UnaryCall),
+          'StreamingCall':
+          grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
+          'UnaryCall':
+          grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
       }
-      server = implementations.server(method_implementations)
+      handler = grpc.method_handlers_generic_handler(
+          'grpc.testing.BenchmarkService', method_implementations)
+      server.add_generic_rpc_handlers((handler,))
     else:
       raise Exception('Unsupported server type {}'.format(config.server_type))
 
     if config.HasField('security_params'):  # Use SSL
-      server_creds = implementations.ssl_server_credentials([(
-          resources.private_key(), resources.certificate_chain())])
+      server_creds = grpc.ssl_server_credentials(
+          ((resources.private_key(), resources.certificate_chain()),))
       port = server.add_secure_port('[::]:{}'.format(config.port), server_creds)
     else:
       port = server.add_insecure_port('[::]:{}'.format(config.port))
diff --git a/src/python/grpcio/tests/stress/__init__.py b/src/python/grpcio_tests/tests/stress/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/stress/__init__.py
rename to src/python/grpcio_tests/tests/stress/__init__.py
diff --git a/src/python/grpcio/tests/stress/client.py b/src/python/grpcio_tests/tests/stress/client.py
similarity index 100%
rename from src/python/grpcio/tests/stress/client.py
rename to src/python/grpcio_tests/tests/stress/client.py
diff --git a/src/python/grpcio/tests/stress/metrics_server.py b/src/python/grpcio_tests/tests/stress/metrics_server.py
similarity index 100%
rename from src/python/grpcio/tests/stress/metrics_server.py
rename to src/python/grpcio_tests/tests/stress/metrics_server.py
diff --git a/src/python/grpcio/tests/stress/test_runner.py b/src/python/grpcio_tests/tests/stress/test_runner.py
similarity index 100%
rename from src/python/grpcio/tests/stress/test_runner.py
rename to src/python/grpcio_tests/tests/stress/test_runner.py
diff --git a/src/python/grpcio/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
similarity index 92%
rename from src/python/grpcio/tests/tests.json
rename to src/python/grpcio_tests/tests/tests.json
index 8e509621a84029ad60f50f9eb59fb1521db29750..dcaef0db1fabb4c36bf996854b9817f30d2b987f 100644
--- a/src/python/grpcio/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -10,9 +10,11 @@
   "_channel_connectivity_test.ChannelConnectivityTest",
   "_channel_ready_future_test.ChannelReadyFutureTest",
   "_channel_test.ChannelTest", 
-  "_connectivity_channel_test.ChannelConnectivityTest", 
+  "_compression_test.CompressionTest",
   "_connectivity_channel_test.ConnectivityStatesTest",
+  "_credentials_test.CredentialsTest",
   "_empty_message_test.EmptyMessageTest",
+  "_exit_test.ExitTest",
   "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", 
   "_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", 
   "_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", 
@@ -24,6 +26,7 @@
   "_implementations_test.ChannelCredentialsTest", 
   "_insecure_interop_test.InsecureInteropTest", 
   "_logging_pool_test.LoggingPoolTest", 
+  "_metadata_code_details_test.MetadataCodeDetailsTest",
   "_metadata_test.MetadataTest",
   "_not_found_test.NotFoundTest", 
   "_python_plugin_test.PythonPluginTest",
diff --git a/src/python/grpcio/tests/unit/_adapter/__init__.py b/src/python/grpcio_tests/tests/unit/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_adapter/__init__.py
rename to src/python/grpcio_tests/tests/unit/__init__.py
diff --git a/src/python/grpcio/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_api_test.py
rename to src/python/grpcio_tests/tests/unit/_api_test.py
diff --git a/src/python/grpcio/tests/unit/_auth_test.py b/src/python/grpcio_tests/tests/unit/_auth_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_auth_test.py
rename to src/python/grpcio_tests/tests/unit/_auth_test.py
diff --git a/src/python/grpcio/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
similarity index 95%
rename from src/python/grpcio/tests/unit/_channel_connectivity_test.py
rename to src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
index a1575efada794f330859174c05dda42a122d7f8d..3c00f686cec53b72e77ee6513ee62d5b557ea932 100644
--- a/src/python/grpcio/tests/unit/_channel_connectivity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
@@ -104,7 +104,7 @@ class ChannelConnectivityTest(unittest.TestCase):
         grpc.ChannelConnectivity.READY, fifth_connectivities)
 
   def test_immediately_connectable_channel_connectivity(self):
-    server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0))
+    server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ())
     port = server.add_insecure_port('[::]:0')
     server.start()
     first_callback = _Callback()
@@ -135,15 +135,15 @@ class ChannelConnectivityTest(unittest.TestCase):
     self.assertNotIn(
         grpc.ChannelConnectivity.TRANSIENT_FAILURE, third_connectivities)
     self.assertNotIn(
-        grpc.ChannelConnectivity.FATAL_FAILURE, third_connectivities)
+        grpc.ChannelConnectivity.SHUTDOWN, third_connectivities)
     self.assertNotIn(
         grpc.ChannelConnectivity.TRANSIENT_FAILURE,
         fourth_connectivities)
     self.assertNotIn(
-        grpc.ChannelConnectivity.FATAL_FAILURE, fourth_connectivities)
+        grpc.ChannelConnectivity.SHUTDOWN, fourth_connectivities)
 
   def test_reachable_then_unreachable_channel_connectivity(self):
-    server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0))
+    server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ())
     port = server.add_insecure_port('[::]:0')
     server.start()
     callback = _Callback()
diff --git a/src/python/grpcio/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
similarity index 98%
rename from src/python/grpcio/tests/unit/_channel_ready_future_test.py
rename to src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
index b84bc0197a92cec171320df8f34dbfdf1db351fe..e8982ed2ded9889491dacb38d2927ad8b4c42cff 100644
--- a/src/python/grpcio/tests/unit/_channel_ready_future_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
@@ -78,7 +78,7 @@ class ChannelReadyFutureTest(unittest.TestCase):
     self.assertFalse(ready_future.running())
 
   def test_immediately_connectable_channel_connectivity(self):
-    server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0))
+    server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ())
     port = server.add_insecure_port('[::]:0')
     server.start()
     channel = grpc.insecure_channel('localhost:{}'.format(port))
diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..83b91094666cd5a0be5e879cf0f3309b622c4532
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_compression_test.py
@@ -0,0 +1,134 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Tests server and client side compression."""
+
+import unittest
+
+import grpc
+from grpc import _grpcio_metadata
+from grpc.framework.foundation import logging_pool
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_UNARY_UNARY = '/test/UnaryUnary'
+_STREAM_STREAM = '/test/StreamStream'
+
+
+def handle_unary(request, servicer_context):
+  servicer_context.send_initial_metadata([
+    ('grpc-internal-encoding-request', 'gzip')])
+  return request
+
+
+def handle_stream(request_iterator, servicer_context):
+  # TODO(issue:#6891) We should be able to remove this loop,
+  # and replace with return; yield
+  servicer_context.send_initial_metadata([
+    ('grpc-internal-encoding-request', 'gzip')])
+  for request in request_iterator:
+    yield request
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+  def __init__(self, request_streaming, response_streaming):
+    self.request_streaming = request_streaming
+    self.response_streaming = response_streaming
+    self.request_deserializer = None
+    self.response_serializer = None
+    self.unary_unary = None
+    self.unary_stream = None
+    self.stream_unary = None
+    self.stream_stream = None
+    if self.request_streaming and self.response_streaming:
+      self.stream_stream = lambda x, y: handle_stream(x, y)
+    elif not self.request_streaming and not self.response_streaming:
+      self.unary_unary = lambda x, y: handle_unary(x, y)
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+  def service(self, handler_call_details):
+    if handler_call_details.method == _UNARY_UNARY:
+      return _MethodHandler(False, False)
+    elif handler_call_details.method == _STREAM_STREAM:
+      return _MethodHandler(True, True)
+    else:
+      return None
+
+
+class CompressionTest(unittest.TestCase):
+
+  def setUp(self):
+    self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+    self._server = grpc.server(
+        self._server_pool, handlers=(_GenericHandler(),))
+    self._port = self._server.add_insecure_port('[::]:0')
+    self._server.start()
+
+  def testUnary(self):
+    request = b'\x00' * 100
+
+    # Client -> server compressed through default client channel compression
+    # settings. Server -> client compressed via server-side metadata setting.
+    # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer
+    # literal with proper use of the public API.
+    compressed_channel = grpc.insecure_channel('localhost:%d' % self._port,
+        options=[('grpc.default_compression_algorithm', 1)])
+    multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)
+    response = multi_callable(request)
+    self.assertEqual(request, response)
+
+    # Client -> server compressed through client metadata setting. Server ->
+    # client compressed via server-side metadata setting.
+    # TODO(https://github.com/grpc/grpc/issues/4078): replace the "0" integer
+    # literal with proper use of the public API.
+    uncompressed_channel = grpc.insecure_channel('localhost:%d' % self._port,
+        options=[('grpc.default_compression_algorithm', 0)])
+    multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)
+    response = multi_callable(request, metadata=[
+      ('grpc-internal-encoding-request', 'gzip')])
+    self.assertEqual(request, response)
+
+  def testStreaming(self):
+    request = b'\x00' * 100
+
+    # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer
+    # literal with proper use of the public API.
+    compressed_channel = grpc.insecure_channel('localhost:%d' % self._port,
+        options=[('grpc.default_compression_algorithm', 1)])
+    multi_callable = compressed_channel.stream_stream(_STREAM_STREAM)
+    call = multi_callable([request] * test_constants.STREAM_LENGTH)
+    for response in call:
+      self.assertEqual(request, response)
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_credentials_test.py b/src/python/grpcio_tests/tests/unit/_credentials_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..87af85a0b9b01f6ce9bf906c5a1199462d72cf27
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_credentials_test.py
@@ -0,0 +1,72 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests of credentials."""
+
+import unittest
+
+import grpc
+
+
+class CredentialsTest(unittest.TestCase):
+
+  def test_call_credentials_composition(self):
+    first = grpc.access_token_call_credentials('abc')
+    second = grpc.access_token_call_credentials('def')
+    third = grpc.access_token_call_credentials('ghi')
+
+    first_and_second = grpc.composite_call_credentials(first, second)
+    first_second_and_third = grpc.composite_call_credentials(
+        first, second, third)
+    
+    self.assertIsInstance(first_and_second, grpc.CallCredentials)
+    self.assertIsInstance(first_second_and_third, grpc.CallCredentials)
+
+  def test_channel_credentials_composition(self):
+    first_call_credentials = grpc.access_token_call_credentials('abc')
+    second_call_credentials = grpc.access_token_call_credentials('def')
+    third_call_credentials = grpc.access_token_call_credentials('ghi')
+    channel_credentials = grpc.ssl_channel_credentials()
+
+    channel_and_first = grpc.composite_channel_credentials(
+        channel_credentials, first_call_credentials)
+    channel_first_and_second = grpc.composite_channel_credentials(
+        channel_credentials, first_call_credentials, second_call_credentials)
+    channel_first_second_and_third = grpc.composite_channel_credentials(
+        channel_credentials, first_call_credentials, second_call_credentials,
+        third_call_credentials)
+
+    self.assertIsInstance(channel_and_first, grpc.ChannelCredentials)
+    self.assertIsInstance(channel_first_and_second, grpc.ChannelCredentials)
+    self.assertIsInstance(
+        channel_first_second_and_third, grpc.ChannelCredentials)
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_cython/.gitignore b/src/python/grpcio_tests/tests/unit/_cython/.gitignore
similarity index 100%
rename from src/python/grpcio/tests/unit/_cython/.gitignore
rename to src/python/grpcio_tests/tests/unit/_cython/.gitignore
diff --git a/src/python/grpcio/tests/unit/_cython/__init__.py b/src/python/grpcio_tests/tests/unit/_cython/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_cython/__init__.py
rename to src/python/grpcio_tests/tests/unit/_cython/__init__.py
diff --git a/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
similarity index 96%
rename from src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py
rename to src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
index c1de7790145bf4b36324716ce2f5d7a94b9fa6fd..cf212c56539a9f69ae58b37c9d55ea1830e21d3e 100644
--- a/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
@@ -81,11 +81,11 @@ class _Handler(object):
         self._state.condition.wait()
 
     with self._lock:
-      self._call.start_batch(
+      self._call.start_server_batch(
           cygrpc.Operations(
               (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
           _RECEIVE_CLOSE_ON_SERVER_TAG)
-      self._call.start_batch(
+      self._call.start_server_batch(
           cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
           _RECEIVE_MESSAGE_TAG)
     first_event = self._completion_queue.poll()
@@ -101,7 +101,7 @@ class _Handler(object):
                 _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
                 _EMPTY_FLAGS),
         )
-        self._call.start_batch(
+        self._call.start_server_batch(
             cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG)
       self._completion_queue.poll()
       self._completion_queue.poll()
@@ -159,9 +159,9 @@ class CancelManyCallsTest(unittest.TestCase):
     server_completion_queue = cygrpc.CompletionQueue()
     server = cygrpc.Server()
     server.register_completion_queue(server_completion_queue)
-    port = server.add_http2_port('[::]:0')
+    port = server.add_http2_port(b'[::]:0')
     server.start()
-    channel = cygrpc.Channel('localhost:{}'.format(port))
+    channel = cygrpc.Channel('localhost:{}'.format(port).encode())
 
     state = _State()
 
@@ -193,7 +193,7 @@ class CancelManyCallsTest(unittest.TestCase):
             cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
         )
         tag = 'client_complete_call_{0:04d}_tag'.format(index)
-        client_call.start_batch(cygrpc.Operations(operations), tag)
+        client_call.start_client_batch(cygrpc.Operations(operations), tag)
         client_due.add(tag)
         client_calls.append(client_call)
 
diff --git a/src/python/grpcio/tests/unit/_cython/_channel_test.py b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
similarity index 97%
rename from src/python/grpcio/tests/unit/_cython/_channel_test.py
rename to src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
index 3dc7a246aeeb7fe7af41a248d1a571909c5b0c1b..f9c8a3ac62bb8401531a5adcac7143a9bb809697 100644
--- a/src/python/grpcio/tests/unit/_cython/_channel_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
@@ -37,7 +37,7 @@ from tests.unit.framework.common import test_constants
 
 
 def _channel_and_completion_queue():
-  channel = cygrpc.Channel('localhost:54321', cygrpc.ChannelArgs(()))
+  channel = cygrpc.Channel(b'localhost:54321', cygrpc.ChannelArgs(()))
   completion_queue = cygrpc.CompletionQueue()
   return channel, completion_queue
 
diff --git a/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
similarity index 92%
rename from src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py
rename to src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
index 6ae7a90fbed27ce247d14ecad384b76bb1832e91..152d8edde3b49aa438d04178ae7dac0d61307d91 100644
--- a/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
@@ -126,9 +126,9 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase):
     server_completion_queue = cygrpc.CompletionQueue()
     server = cygrpc.Server()
     server.register_completion_queue(server_completion_queue)
-    port = server.add_http2_port('[::]:0')
+    port = server.add_http2_port(b'[::]:0')
     server.start()
-    channel = cygrpc.Channel('localhost:{}'.format(port))
+    channel = cygrpc.Channel('localhost:{}'.format(port).encode())
 
     server_shutdown_tag = 'server_shutdown_tag'
     server_driver = _ServerDriver(server_completion_queue, server_shutdown_tag)
@@ -168,12 +168,12 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase):
     client_complete_rpc_tag = 'client_complete_rpc_tag'
     with client_condition:
       client_receive_initial_metadata_start_batch_result = (
-          client_call.start_batch(cygrpc.Operations([
+          client_call.start_client_batch(cygrpc.Operations([
               cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
           ]), client_receive_initial_metadata_tag))
       client_due.add(client_receive_initial_metadata_tag)
       client_complete_rpc_start_batch_result = (
-          client_call.start_batch(cygrpc.Operations([
+          client_call.start_client_batch(cygrpc.Operations([
               cygrpc.operation_send_initial_metadata(
                   _EMPTY_METADATA, _EMPTY_FLAGS),
               cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
@@ -185,30 +185,30 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase):
 
     with server_call_condition:
       server_send_initial_metadata_start_batch_result = (
-          server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+          server_rpc_event.operation_call.start_server_batch([
               cygrpc.operation_send_initial_metadata(
                   _EMPTY_METADATA, _EMPTY_FLAGS),
-          ]), server_send_initial_metadata_tag))
+          ], server_send_initial_metadata_tag))
       server_send_first_message_start_batch_result = (
-          server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+          server_rpc_event.operation_call.start_server_batch([
               cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
-          ]), server_send_first_message_tag))
+          ], server_send_first_message_tag))
     server_send_initial_metadata_event = server_call_driver.event_with_tag(
         server_send_initial_metadata_tag)
     server_send_first_message_event = server_call_driver.event_with_tag(
         server_send_first_message_tag)
     with server_call_condition:
       server_send_second_message_start_batch_result = (
-          server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+          server_rpc_event.operation_call.start_server_batch([
               cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
-          ]), server_send_second_message_tag))
+          ], server_send_second_message_tag))
       server_complete_rpc_start_batch_result = (
-          server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+          server_rpc_event.operation_call.start_server_batch([
               cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
               cygrpc.operation_send_status_from_server(
                   cygrpc.Metadata(()), cygrpc.StatusCode.ok, b'test details',
                   _EMPTY_FLAGS),
-          ]), server_complete_rpc_tag))
+          ], server_complete_rpc_tag))
     server_send_second_message_event = server_call_driver.event_with_tag(
         server_send_second_message_tag)
     server_complete_rpc_event = server_call_driver.event_with_tag(
@@ -218,7 +218,7 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase):
     with client_condition:
       client_receive_first_message_tag = 'client_receive_first_message_tag'
       client_receive_first_message_start_batch_result = (
-          client_call.start_batch(cygrpc.Operations([
+          client_call.start_client_batch(cygrpc.Operations([
               cygrpc.operation_receive_message(_EMPTY_FLAGS),
           ]), client_receive_first_message_tag))
       client_due.add(client_receive_first_message_tag)
diff --git a/src/python/grpcio/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
similarity index 93%
rename from src/python/grpcio/tests/unit/_cython/cygrpc_test.py
rename to src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
index a006a20ce3f669774ac1e2fc4562fe362c634345..f9a8e2401bbb8feb0e6d3b696a6b486bfd66e8d5 100644
--- a/src/python/grpcio/tests/unit/_cython/cygrpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
@@ -46,38 +46,38 @@ def _metadata_plugin_callback(context, callback):
   callback(cygrpc.Metadata(
       [cygrpc.Metadatum(_CALL_CREDENTIALS_METADATA_KEY,
                         _CALL_CREDENTIALS_METADATA_VALUE)]),
-      cygrpc.StatusCode.ok, '')
+      cygrpc.StatusCode.ok, b'')
 
 
 class TypeSmokeTest(unittest.TestCase):
 
   def testStringsInUtilitiesUpDown(self):
     self.assertEqual(0, cygrpc.StatusCode.ok)
-    metadatum = cygrpc.Metadatum('a', 'b')
-    self.assertEqual('a'.encode(), metadatum.key)
-    self.assertEqual('b'.encode(), metadatum.value)
+    metadatum = cygrpc.Metadatum(b'a', b'b')
+    self.assertEqual(b'a', metadatum.key)
+    self.assertEqual(b'b', metadatum.value)
     metadata = cygrpc.Metadata([metadatum])
     self.assertEqual(1, len(metadata))
     self.assertEqual(metadatum.key, metadata[0].key)
 
   def testMetadataIteration(self):
     metadata = cygrpc.Metadata([
-        cygrpc.Metadatum('a', 'b'), cygrpc.Metadatum('c', 'd')])
+        cygrpc.Metadatum(b'a', b'b'), cygrpc.Metadatum(b'c', b'd')])
     iterator = iter(metadata)
     metadatum = next(iterator)
     self.assertIsInstance(metadatum, cygrpc.Metadatum)
-    self.assertEqual(metadatum.key, 'a'.encode())
-    self.assertEqual(metadatum.value, 'b'.encode())
+    self.assertEqual(metadatum.key, b'a')
+    self.assertEqual(metadatum.value, b'b')
     metadatum = next(iterator)
     self.assertIsInstance(metadatum, cygrpc.Metadatum)
-    self.assertEqual(metadatum.key, 'c'.encode())
-    self.assertEqual(metadatum.value, 'd'.encode())
+    self.assertEqual(metadatum.key, b'c')
+    self.assertEqual(metadatum.value, b'd')
     with self.assertRaises(StopIteration):
       next(iterator)
 
   def testOperationsIteration(self):
     operations = cygrpc.Operations([
-        cygrpc.operation_send_message('asdf', _EMPTY_FLAGS)])
+        cygrpc.operation_send_message(b'asdf', _EMPTY_FLAGS)])
     iterator = iter(operations)
     operation = next(iterator)
     self.assertIsInstance(operation, cygrpc.Operation)
@@ -87,7 +87,7 @@ class TypeSmokeTest(unittest.TestCase):
       next(iterator)
 
   def testOperationFlags(self):
-    operation = cygrpc.operation_send_message('asdf',
+    operation = cygrpc.operation_send_message(b'asdf',
                                               cygrpc.WriteFlag.no_compress)
     self.assertEqual(cygrpc.WriteFlag.no_compress, operation.flags)
 
@@ -105,16 +105,16 @@ class TypeSmokeTest(unittest.TestCase):
     del server
 
   def testChannelUpDown(self):
-    channel = cygrpc.Channel('[::]:0', cygrpc.ChannelArgs([]))
+    channel = cygrpc.Channel(b'[::]:0', cygrpc.ChannelArgs([]))
     del channel
 
   def testCredentialsMetadataPluginUpDown(self):
     plugin = cygrpc.CredentialsMetadataPlugin(
-        lambda ignored_a, ignored_b: None, '')
+        lambda ignored_a, ignored_b: None, b'')
     del plugin
 
   def testCallCredentialsFromPluginUpDown(self):
-    plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, '')
+    plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, b'')
     call_credentials = cygrpc.call_credentials_metadata_plugin(plugin)
     del plugin
     del call_credentials
@@ -123,7 +123,7 @@ class TypeSmokeTest(unittest.TestCase):
     server = cygrpc.Server()
     completion_queue = cygrpc.CompletionQueue()
     server.register_completion_queue(completion_queue)
-    port = server.add_http2_port('[::]:0')
+    port = server.add_http2_port(b'[::]:0')
     self.assertIsInstance(port, int)
     server.start()
     del server
@@ -131,7 +131,7 @@ class TypeSmokeTest(unittest.TestCase):
   def testServerStartShutdown(self):
     completion_queue = cygrpc.CompletionQueue()
     server = cygrpc.Server()
-    server.add_http2_port('[::]:0')
+    server.add_http2_port(b'[::]:0')
     server.register_completion_queue(completion_queue)
     server.start()
     shutdown_tag = object()
@@ -150,9 +150,9 @@ class ServerClientMixin(object):
     self.server = cygrpc.Server()
     self.server.register_completion_queue(self.server_completion_queue)
     if server_credentials:
-      self.port = self.server.add_http2_port('[::]:0', server_credentials)
+      self.port = self.server.add_http2_port(b'[::]:0', server_credentials)
     else:
-      self.port = self.server.add_http2_port('[::]:0')
+      self.port = self.server.add_http2_port(b'[::]:0')
     self.server.start()
     self.client_completion_queue = cygrpc.CompletionQueue()
     if client_credentials:
@@ -160,10 +160,10 @@ class ServerClientMixin(object):
           cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override,
                             host_override)])
       self.client_channel = cygrpc.Channel(
-          'localhost:{}'.format(self.port), client_channel_arguments,
+          'localhost:{}'.format(self.port).encode(), client_channel_arguments,
           client_credentials)
     else:
-      self.client_channel = cygrpc.Channel('localhost:{}'.format(self.port))
+      self.client_channel = cygrpc.Channel('localhost:{}'.format(self.port).encode())
     if host_override:
       self.host_argument = None  # default host
       self.expected_host = host_override
@@ -186,7 +186,8 @@ class ServerClientMixin(object):
     def performer():
       tag = object()
       try:
-        call_result = call.start_batch(cygrpc.Operations(operations), tag)
+        call_result = call.start_client_batch(
+            cygrpc.Operations(operations), tag)
         self.assertEqual(cygrpc.CallError.ok, call_result)
         event = queue.poll(deadline)
         self.assertEqual(cygrpc.CompletionType.operation_complete, event.type)
@@ -231,7 +232,7 @@ class ServerClientMixin(object):
         cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY,
                          CLIENT_METADATA_ASCII_VALUE),
         cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)])
-    client_start_batch_result = client_call.start_batch(cygrpc.Operations([
+    client_start_batch_result = client_call.start_client_batch([
         cygrpc.operation_send_initial_metadata(client_initial_metadata,
                                                _EMPTY_FLAGS),
         cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS),
@@ -239,7 +240,7 @@ class ServerClientMixin(object):
         cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
         cygrpc.operation_receive_message(_EMPTY_FLAGS),
         cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS)
-    ]), client_call_tag)
+    ], client_call_tag)
     self.assertEqual(cygrpc.CallError.ok, client_start_batch_result)
     client_event_future = test_utilities.CompletionQueuePollFuture(
         self.client_completion_queue, cygrpc_deadline)
@@ -268,7 +269,7 @@ class ServerClientMixin(object):
     server_trailing_metadata = cygrpc.Metadata([
         cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY,
                          SERVER_TRAILING_METADATA_VALUE)])
-    server_start_batch_result = server_call.start_batch([
+    server_start_batch_result = server_call.start_server_batch([
         cygrpc.operation_send_initial_metadata(server_initial_metadata,
                                                _EMPTY_FLAGS),
         cygrpc.operation_receive_message(_EMPTY_FLAGS),
@@ -280,8 +281,8 @@ class ServerClientMixin(object):
     ], server_call_tag)
     self.assertEqual(cygrpc.CallError.ok, server_start_batch_result)
 
-    client_event = client_event_future.result()
     server_event = self.server_completion_queue.poll(cygrpc_deadline)
+    client_event = client_event_future.result()
 
     self.assertEqual(6, len(client_event.batch_operations))
     found_client_op_types = set()
diff --git a/src/python/grpcio/tests/unit/_cython/test_utilities.py b/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_cython/test_utilities.py
rename to src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
diff --git a/src/python/grpcio/tests/unit/_empty_message_test.py b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
similarity index 95%
rename from src/python/grpcio/tests/unit/_empty_message_test.py
rename to src/python/grpcio_tests/tests/unit/_empty_message_test.py
index f324f6216b74ee5df964ab8e6b28ca2740b97d1e..131f6e945253acae867894f34cedf1c6c60c5705 100644
--- a/src/python/grpcio/tests/unit/_empty_message_test.py
+++ b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
@@ -37,10 +37,10 @@ from tests.unit.framework.common import test_constants
 _REQUEST = b''
 _RESPONSE = b''
 
-_UNARY_UNARY = b'/test/UnaryUnary'
-_UNARY_STREAM = b'/test/UnaryStream'
-_STREAM_UNARY = b'/test/StreamUnary'
-_STREAM_STREAM = b'/test/StreamStream'
+_UNARY_UNARY = '/test/UnaryUnary'
+_UNARY_STREAM = '/test/UnaryStream'
+_STREAM_UNARY = '/test/StreamUnary'
+_STREAM_STREAM = '/test/StreamStream'
 
 
 def handle_unary_unary(request, servicer_context):
@@ -103,7 +103,8 @@ class EmptyMessageTest(unittest.TestCase):
 
   def setUp(self):
     self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-    self._server = grpc.server((_GenericHandler(),), self._server_pool)
+    self._server = grpc.server(
+        self._server_pool, handlers=(_GenericHandler(),))
     port = self._server.add_insecure_port('[::]:0')
     self._server.start()
     self._channel = grpc.insecure_channel('localhost:%d' % port)
diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
new file mode 100644
index 0000000000000000000000000000000000000000..b33802bf57294d7e45edbc5cf739cee43ad65b4d
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
@@ -0,0 +1,249 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines a number of module-scope gRPC scenarios to test clean exit."""
+
+import argparse
+import threading
+import time
+
+import grpc
+
+from tests.unit.framework.common import test_constants
+
+WAIT_TIME = 1000
+
+REQUEST = b'request'
+
+UNSTARTED_SERVER = 'unstarted_server'
+RUNNING_SERVER = 'running_server'
+POLL_CONNECTIVITY_NO_SERVER = 'poll_connectivity_no_server'
+POLL_CONNECTIVITY = 'poll_connectivity'
+IN_FLIGHT_UNARY_UNARY_CALL = 'in_flight_unary_unary_call'
+IN_FLIGHT_UNARY_STREAM_CALL = 'in_flight_unary_stream_call'
+IN_FLIGHT_STREAM_UNARY_CALL = 'in_flight_stream_unary_call'
+IN_FLIGHT_STREAM_STREAM_CALL = 'in_flight_stream_stream_call'
+IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL = 'in_flight_partial_unary_stream_call'
+IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL = 'in_flight_partial_stream_unary_call'
+IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL = 'in_flight_partial_stream_stream_call'
+
+UNARY_UNARY = b'/test/UnaryUnary'
+UNARY_STREAM = b'/test/UnaryStream'
+STREAM_UNARY = b'/test/StreamUnary'
+STREAM_STREAM = b'/test/StreamStream'
+PARTIAL_UNARY_STREAM = b'/test/PartialUnaryStream'
+PARTIAL_STREAM_UNARY = b'/test/PartialStreamUnary'
+PARTIAL_STREAM_STREAM = b'/test/PartialStreamStream'
+
+TEST_TO_METHOD = {
+    IN_FLIGHT_UNARY_UNARY_CALL: UNARY_UNARY,
+    IN_FLIGHT_UNARY_STREAM_CALL: UNARY_STREAM,
+    IN_FLIGHT_STREAM_UNARY_CALL: STREAM_UNARY,
+    IN_FLIGHT_STREAM_STREAM_CALL: STREAM_STREAM,
+    IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL: PARTIAL_UNARY_STREAM,
+    IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL: PARTIAL_STREAM_UNARY,
+    IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL: PARTIAL_STREAM_STREAM,
+}
+
+
+def hang_unary_unary(request, servicer_context):
+  time.sleep(WAIT_TIME)
+
+
+def hang_unary_stream(request, servicer_context):
+  time.sleep(WAIT_TIME)
+
+
+def hang_partial_unary_stream(request, servicer_context):
+  for _ in range(test_constants.STREAM_LENGTH // 2):
+    yield request
+  time.sleep(WAIT_TIME)
+
+
+def hang_stream_unary(request_iterator, servicer_context):
+  time.sleep(WAIT_TIME)
+
+
+def hang_partial_stream_unary(request_iterator, servicer_context):
+  for _ in range(test_constants.STREAM_LENGTH // 2):
+    next(request_iterator)
+  time.sleep(WAIT_TIME)
+
+
+def hang_stream_stream(request_iterator, servicer_context):
+  time.sleep(WAIT_TIME)
+
+
+def hang_partial_stream_stream(request_iterator, servicer_context):
+  for _ in range(test_constants.STREAM_LENGTH // 2):
+    yield next(request_iterator)
+  time.sleep(WAIT_TIME)
+
+
+class MethodHandler(grpc.RpcMethodHandler):
+
+  def __init__(self, request_streaming, response_streaming, partial_hang):
+    self.request_streaming = request_streaming
+    self.response_streaming = response_streaming
+    self.request_deserializer = None
+    self.response_serializer = None
+    self.unary_unary = None
+    self.unary_stream = None
+    self.stream_unary = None
+    self.stream_stream = None
+    if self.request_streaming and self.response_streaming:
+      if partial_hang:
+        self.stream_stream = hang_partial_stream_stream
+      else:
+        self.stream_stream = hang_stream_stream
+    elif self.request_streaming:
+      if partial_hang:
+        self.stream_unary = hang_partial_stream_unary
+      else:
+        self.stream_unary = hang_stream_unary
+    elif self.response_streaming:
+      if partial_hang:
+        self.unary_stream = hang_partial_unary_stream
+      else:
+        self.unary_stream = hang_unary_stream
+    else:
+      self.unary_unary = hang_unary_unary
+
+
+class GenericHandler(grpc.GenericRpcHandler):
+
+  def service(self, handler_call_details):
+    if handler_call_details.method == UNARY_UNARY:
+      return MethodHandler(False, False, False)
+    elif handler_call_details.method == UNARY_STREAM:
+      return MethodHandler(False, True, False)
+    elif handler_call_details.method == STREAM_UNARY:
+      return MethodHandler(True, False, False)
+    elif handler_call_details.method == STREAM_STREAM:
+      return MethodHandler(True, True, False)
+    elif handler_call_details.method == PARTIAL_UNARY_STREAM:
+      return MethodHandler(False, True, True)
+    elif handler_call_details.method == PARTIAL_STREAM_UNARY:
+      return MethodHandler(True, False, True)
+    elif handler_call_details.method == PARTIAL_STREAM_STREAM:
+      return MethodHandler(True, True, True)
+    else:
+      return None
+
+
+# Traditional executors will not exit until all their
+# current jobs complete.  Because we submit jobs that will
+# never finish, we don't want to block exit on these jobs.
+class DaemonPool(object):
+
+  def submit(self, fn, *args, **kwargs):
+    thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
+    thread.daemon = True
+    thread.start()
+
+  def shutdown(self, wait=True):
+    pass
+
+
+def infinite_request_iterator():
+  while True:
+    yield REQUEST
+
+
+if __name__ == '__main__':
+  parser = argparse.ArgumentParser()
+  parser.add_argument('scenario', type=str)
+  parser.add_argument(
+      '--wait_for_interrupt', dest='wait_for_interrupt', action='store_true')
+  args = parser.parse_args()
+
+  if args.scenario == UNSTARTED_SERVER:
+    server = grpc.server(DaemonPool())
+    if args.wait_for_interrupt:
+      time.sleep(WAIT_TIME)
+  elif args.scenario == RUNNING_SERVER:
+    server = grpc.server(DaemonPool())
+    port = server.add_insecure_port('[::]:0')
+    server.start()
+    if args.wait_for_interrupt:
+      time.sleep(WAIT_TIME)
+  elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
+    channel = grpc.insecure_channel('localhost:12345')
+
+    def connectivity_callback(connectivity):
+      pass
+
+    channel.subscribe(connectivity_callback, try_to_connect=True)
+    if args.wait_for_interrupt:
+      time.sleep(WAIT_TIME)
+  elif args.scenario == POLL_CONNECTIVITY:
+    server = grpc.server(DaemonPool())
+    port = server.add_insecure_port('[::]:0')
+    server.start()
+    channel = grpc.insecure_channel('localhost:%d' % port)
+
+    def connectivity_callback(connectivity):
+      pass
+
+    channel.subscribe(connectivity_callback, try_to_connect=True)
+    if args.wait_for_interrupt:
+      time.sleep(WAIT_TIME)
+
+  else:
+    handler = GenericHandler()
+    server = grpc.server(DaemonPool())
+    port = server.add_insecure_port('[::]:0')
+    server.add_generic_rpc_handlers((handler,))
+    server.start()
+    channel = grpc.insecure_channel('localhost:%d' % port)
+
+    method = TEST_TO_METHOD[args.scenario]
+
+    if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL:
+      multi_callable = channel.unary_unary(method)
+      future = multi_callable.future(REQUEST)
+      result, call = multi_callable.with_call(REQUEST)
+    elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or
+          args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL):
+      multi_callable = channel.unary_stream(method)
+      response_iterator = multi_callable(REQUEST)
+      for response in response_iterator:
+        pass
+    elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or
+          args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL):
+      multi_callable = channel.stream_unary(method)
+      future = multi_callable.future(infinite_request_iterator())
+      result, call = multi_callable.with_call(
+          [REQUEST] * test_constants.STREAM_LENGTH)
+    elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or
+          args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL):
+      multi_callable = channel.stream_stream(method)
+      response_iterator = multi_callable(infinite_request_iterator())
+      for response in response_iterator:
+        pass
diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a4a32887c3634cdd263b65831c8e65fa88bb65b
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_exit_test.py
@@ -0,0 +1,186 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests clean exit of server/client on Python Interpreter exit/sigint.
+
+The tests in this module spawn a subprocess for each test case, the
+test is considered successful if it doesn't hang/timeout.
+"""
+
+import atexit
+import os
+import signal
+import six
+import subprocess
+import sys
+import threading
+import time
+import unittest
+
+from tests.unit import _exit_scenarios
+
+SCENARIO_FILE = os.path.abspath(os.path.join(
+    os.path.dirname(os.path.realpath(__file__)), '_exit_scenarios.py'))
+INTERPRETER = sys.executable
+BASE_COMMAND = [INTERPRETER, SCENARIO_FILE]
+BASE_SIGTERM_COMMAND = BASE_COMMAND + ['--wait_for_interrupt']
+
+INIT_TIME = 1.0
+
+
+processes = []
+process_lock = threading.Lock()
+
+
+# Make sure we attempt to clean up any
+# processes we may have left running
+def cleanup_processes():
+  with process_lock:
+    for process in processes:
+      try:
+        process.kill()
+      except Exception:
+        pass
+atexit.register(cleanup_processes)
+
+
+def interrupt_and_wait(process):
+  with process_lock:
+    processes.append(process)
+  time.sleep(INIT_TIME)
+  os.kill(process.pid, signal.SIGINT)
+  process.wait()
+
+
+def wait(process):
+  with process_lock:
+    processes.append(process)
+  process.wait()
+
+
+@unittest.skip('https://github.com/grpc/grpc/issues/7311')
+class ExitTest(unittest.TestCase):
+
+  def test_unstarted_server(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.UNSTARTED_SERVER],
+        stdout=sys.stdout, stderr=sys.stderr)
+    wait(process)
+
+  def test_unstarted_server_terminate(self):
+    process = subprocess.Popen(
+        BASE_SIGTERM_COMMAND + [_exit_scenarios.UNSTARTED_SERVER],
+        stdout=sys.stdout)
+    interrupt_and_wait(process)
+
+  def test_running_server(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.RUNNING_SERVER],
+        stdout=sys.stdout, stderr=sys.stderr)
+    wait(process)
+
+  def test_running_server_terminate(self):
+    process = subprocess.Popen(
+        BASE_SIGTERM_COMMAND + [_exit_scenarios.RUNNING_SERVER],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  def test_poll_connectivity_no_server(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
+        stdout=sys.stdout, stderr=sys.stderr)
+    wait(process)
+
+  def test_poll_connectivity_no_server_terminate(self):
+    process = subprocess.Popen(
+        BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  def test_poll_connectivity(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY],
+        stdout=sys.stdout, stderr=sys.stderr)
+    wait(process)
+
+  def test_poll_connectivity_terminate(self):
+    process = subprocess.Popen(
+        BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  def test_in_flight_unary_unary_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_UNARY_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+  def test_in_flight_unary_stream_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_STREAM_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  def test_in_flight_stream_unary_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_UNARY_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+  def test_in_flight_stream_stream_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_STREAM_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+  def test_in_flight_partial_unary_stream_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  def test_in_flight_partial_stream_unary_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+  @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999')
+  def test_in_flight_partial_stream_stream_call(self):
+    process = subprocess.Popen(
+        BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL],
+        stdout=sys.stdout, stderr=sys.stderr)
+    interrupt_and_wait(process)
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_from_grpc_import_star.py b/src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_from_grpc_import_star.py
rename to src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py
diff --git a/src/python/grpcio/tests/unit/_junkdrawer/__init__.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_junkdrawer/__init__.py
rename to src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py
diff --git a/src/python/grpcio/tests/unit/_junkdrawer/stock_pb2.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_junkdrawer/stock_pb2.py
rename to src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb3e5477815777f0968be96553b7901e1129c106
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -0,0 +1,523 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests application-provided metadata, status code, and details."""
+
+import threading
+import unittest
+
+import grpc
+from grpc.framework.foundation import logging_pool
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+from tests.unit.framework.common import test_control
+
+_SERIALIZED_REQUEST = b'\x46\x47\x48'
+_SERIALIZED_RESPONSE = b'\x49\x50\x51'
+
+_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST
+_REQUEST_DESERIALIZER = lambda unused_serialized_request: object()
+_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE
+_RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse: object()
+
+_SERVICE = 'test.TestService'
+_UNARY_UNARY = 'UnaryUnary'
+_UNARY_STREAM = 'UnaryStream'
+_STREAM_UNARY = 'StreamUnary'
+_STREAM_STREAM = 'StreamStream'
+
+_CLIENT_METADATA = (
+    ('client-md-key', 'client-md-key'),
+    ('client-md-key-bin', b'\x00\x01')
+)
+
+_SERVER_INITIAL_METADATA = (
+    ('server-initial-md-key', 'server-initial-md-value'),
+    ('server-initial-md-key-bin', b'\x00\x02')
+)
+
+_SERVER_TRAILING_METADATA = (
+    ('server-trailing-md-key', 'server-trailing-md-value'),
+    ('server-trailing-md-key-bin', b'\x00\x03')
+)
+
+_NON_OK_CODE = grpc.StatusCode.NOT_FOUND
+_DETAILS = 'Test details!'
+
+
+class _Servicer(object):
+
+  def __init__(self):
+    self._lock = threading.Lock()
+    self._code = None
+    self._details = None
+    self._exception = False
+    self._return_none = False
+    self._received_client_metadata = None
+
+  def unary_unary(self, request, context):
+    with self._lock:
+      self._received_client_metadata = context.invocation_metadata()
+      context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+      context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+      if self._code is not None:
+        context.set_code(self._code)
+      if self._details is not None:
+        context.set_details(self._details)
+      if self._exception:
+        raise test_control.Defect()
+      else:
+        return None if self._return_none else object()
+
+  def unary_stream(self, request, context):
+    with self._lock:
+      self._received_client_metadata = context.invocation_metadata()
+      context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+      context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+      if self._code is not None:
+        context.set_code(self._code)
+      if self._details is not None:
+        context.set_details(self._details)
+      for _ in range(test_constants.STREAM_LENGTH // 2):
+        yield _SERIALIZED_RESPONSE
+      if self._exception:
+        raise test_control.Defect()
+
+  def stream_unary(self, request_iterator, context):
+    with self._lock:
+      self._received_client_metadata = context.invocation_metadata()
+      context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+      context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+      if self._code is not None:
+        context.set_code(self._code)
+      if self._details is not None:
+        context.set_details(self._details)
+      # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
+      # request iterator.
+      for ignored_request in request_iterator:
+        pass
+      if self._exception:
+        raise test_control.Defect()
+      else:
+        return None if self._return_none else _SERIALIZED_RESPONSE
+
+  def stream_stream(self, request_iterator, context):
+    with self._lock:
+      self._received_client_metadata = context.invocation_metadata()
+      context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+      context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+      if self._code is not None:
+        context.set_code(self._code)
+      if self._details is not None:
+        context.set_details(self._details)
+      # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
+      # request iterator.
+      for ignored_request in request_iterator:
+        pass
+      for _ in range(test_constants.STREAM_LENGTH // 3):
+        yield object()
+      if self._exception:
+        raise test_control.Defect()
+
+  def set_code(self, code):
+    with self._lock:
+      self._code = code
+
+  def set_details(self, details):
+    with self._lock:
+      self._details = details
+
+  def set_exception(self):
+    with self._lock:
+      self._exception = True
+
+  def set_return_none(self):
+    with self._lock:
+      self._return_none = True
+
+  def received_client_metadata(self):
+    with self._lock:
+      return self._received_client_metadata
+
+
+def _generic_handler(servicer):
+  method_handlers = {
+      _UNARY_UNARY: grpc.unary_unary_rpc_method_handler(
+          servicer.unary_unary, request_deserializer=_REQUEST_DESERIALIZER,
+          response_serializer=_RESPONSE_SERIALIZER),
+      _UNARY_STREAM: grpc.unary_stream_rpc_method_handler(
+          servicer.unary_stream),
+      _STREAM_UNARY: grpc.stream_unary_rpc_method_handler(
+          servicer.stream_unary),
+      _STREAM_STREAM: grpc.stream_stream_rpc_method_handler(
+          servicer.stream_stream, request_deserializer=_REQUEST_DESERIALIZER,
+          response_serializer=_RESPONSE_SERIALIZER),
+  }
+  return grpc.method_handlers_generic_handler(_SERVICE, method_handlers)
+
+
+class MetadataCodeDetailsTest(unittest.TestCase):
+
+  def setUp(self):
+    self._servicer = _Servicer()
+    self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+    self._server = grpc.server(
+        self._server_pool, handlers=(_generic_handler(self._servicer),))
+    port = self._server.add_insecure_port('[::]:0')
+    self._server.start()
+
+    channel = grpc.insecure_channel('localhost:{}'.format(port))
+    self._unary_unary = channel.unary_unary(
+        '/'.join(('', _SERVICE, _UNARY_UNARY,)),
+        request_serializer=_REQUEST_SERIALIZER,
+        response_deserializer=_RESPONSE_DESERIALIZER,)
+    self._unary_stream = channel.unary_stream(
+        '/'.join(('', _SERVICE, _UNARY_STREAM,)),)
+    self._stream_unary = channel.stream_unary(
+        '/'.join(('', _SERVICE, _STREAM_UNARY,)),)
+    self._stream_stream = channel.stream_stream(
+        '/'.join(('', _SERVICE, _STREAM_STREAM,)),
+        request_serializer=_REQUEST_SERIALIZER,
+        response_deserializer=_RESPONSE_DESERIALIZER,)
+
+
+  def testSuccessfulUnaryUnary(self):
+    self._servicer.set_details(_DETAILS)
+
+    unused_response, call = self._unary_unary.with_call(
+        object(), metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, call.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(grpc.StatusCode.OK, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testSuccessfulUnaryStream(self):
+    self._servicer.set_details(_DETAILS)
+
+    call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+    received_initial_metadata = call.initial_metadata()
+    for _ in call:
+      pass
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, received_initial_metadata))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(grpc.StatusCode.OK, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testSuccessfulStreamUnary(self):
+    self._servicer.set_details(_DETAILS)
+
+    unused_response, call = self._stream_unary.with_call(
+        iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+        metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, call.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(grpc.StatusCode.OK, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testSuccessfulStreamStream(self):
+    self._servicer.set_details(_DETAILS)
+
+    call = self._stream_stream(
+        iter([object()] * test_constants.STREAM_LENGTH),
+        metadata=_CLIENT_METADATA)
+    received_initial_metadata = call.initial_metadata()
+    for _ in call:
+      pass
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, received_initial_metadata))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(grpc.StatusCode.OK, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testCustomCodeUnaryUnary(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+
+    with self.assertRaises(grpc.RpcError) as exception_context:
+      self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA,
+            exception_context.exception.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA,
+            exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+  def testCustomCodeUnaryStream(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+
+    call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+    received_initial_metadata = call.initial_metadata()
+    with self.assertRaises(grpc.RpcError):
+      for _ in call:
+        pass
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, received_initial_metadata))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testCustomCodeStreamUnary(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+
+    with self.assertRaises(grpc.RpcError) as exception_context:
+      self._stream_unary.with_call(
+          iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+          metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+          _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+          _SERVER_INITIAL_METADATA,
+          exception_context.exception.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+          _SERVER_TRAILING_METADATA,
+          exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+  def testCustomCodeStreamStream(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+
+    call = self._stream_stream(
+        iter([object()] * test_constants.STREAM_LENGTH),
+        metadata=_CLIENT_METADATA)
+    received_initial_metadata = call.initial_metadata()
+    with self.assertRaises(grpc.RpcError) as exception_context:
+      for _ in call:
+        pass
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+          _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, received_initial_metadata))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA,
+            exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+  def testCustomCodeExceptionUnaryUnary(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+    self._servicer.set_exception()
+
+    with self.assertRaises(grpc.RpcError) as exception_context:
+     self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA,
+            exception_context.exception.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA,
+            exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+  def testCustomCodeExceptionUnaryStream(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+    self._servicer.set_exception()
+
+    call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
+    received_initial_metadata = call.initial_metadata()
+    with self.assertRaises(grpc.RpcError):
+      for _ in call:
+        pass
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, received_initial_metadata))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testCustomCodeExceptionStreamUnary(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+    self._servicer.set_exception()
+
+    with self.assertRaises(grpc.RpcError) as exception_context:
+      self._stream_unary.with_call(
+          iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+          metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA,
+            exception_context.exception.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA,
+            exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+  def testCustomCodeExceptionStreamStream(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+    self._servicer.set_exception()
+
+    call = self._stream_stream(
+        iter([object()] * test_constants.STREAM_LENGTH),
+        metadata=_CLIENT_METADATA)
+    received_initial_metadata = call.initial_metadata()
+    with self.assertRaises(grpc.RpcError):
+      for _ in call:
+        pass
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA, received_initial_metadata))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, call.code())
+    self.assertEqual(_DETAILS, call.details())
+
+  def testCustomCodeReturnNoneUnaryUnary(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+    self._servicer.set_return_none()
+
+    with self.assertRaises(grpc.RpcError) as exception_context:
+      self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA,
+            exception_context.exception.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA,
+            exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+  def testCustomCodeReturnNoneStreamUnary(self):
+    self._servicer.set_code(_NON_OK_CODE)
+    self._servicer.set_details(_DETAILS)
+    self._servicer.set_return_none()
+
+    with self.assertRaises(grpc.RpcError) as exception_context:
+      self._stream_unary.with_call(
+          iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
+          metadata=_CLIENT_METADATA)
+
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _CLIENT_METADATA, self._servicer.received_client_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_INITIAL_METADATA,
+            exception_context.exception.initial_metadata()))
+    self.assertTrue(
+        test_common.metadata_transmitted(
+            _SERVER_TRAILING_METADATA,
+            exception_context.exception.trailing_metadata()))
+    self.assertIs(_NON_OK_CODE, exception_context.exception.code())
+    self.assertEqual(_DETAILS, exception_context.exception.details())
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
similarity index 89%
rename from src/python/grpcio/tests/unit/_metadata_test.py
rename to src/python/grpcio_tests/tests/unit/_metadata_test.py
index 77b39012619a16d94fb983c44fe98c5b3215e055..da7347692998d8e0fa35b5a04db790e23cde8850 100644
--- a/src/python/grpcio/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -44,33 +44,33 @@ _CHANNEL_ARGS = (('grpc.primary_user_agent', 'primary-agent'),
 _REQUEST = b'\x00\x00\x00'
 _RESPONSE = b'\x00\x00\x00'
 
-_UNARY_UNARY = b'/test/UnaryUnary'
-_UNARY_STREAM = b'/test/UnaryStream'
-_STREAM_UNARY = b'/test/StreamUnary'
-_STREAM_STREAM = b'/test/StreamStream'
+_UNARY_UNARY = '/test/UnaryUnary'
+_UNARY_STREAM = '/test/UnaryStream'
+_STREAM_UNARY = '/test/StreamUnary'
+_STREAM_STREAM = '/test/StreamStream'
 
 _USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
 
 _CLIENT_METADATA = (
-    (b'client-md-key', b'client-md-key'),
-    (b'client-md-key-bin', b'\x00\x01')
+    ('client-md-key', 'client-md-key'),
+    ('client-md-key-bin', b'\x00\x01')
 )
 
 _SERVER_INITIAL_METADATA = (
-    (b'server-initial-md-key', b'server-initial-md-value'),
-    (b'server-initial-md-key-bin', b'\x00\x02')
+    ('server-initial-md-key', 'server-initial-md-value'),
+    ('server-initial-md-key-bin', b'\x00\x02')
 )
 
 _SERVER_TRAILING_METADATA = (
-    (b'server-trailing-md-key', b'server-trailing-md-value'),
-    (b'server-trailing-md-key-bin', b'\x00\x03')
+    ('server-trailing-md-key', 'server-trailing-md-value'),
+    ('server-trailing-md-key-bin', b'\x00\x03')
 )
 
 
 def user_agent(metadata):
   for key, val in metadata:
-    if key == b'user-agent':
-      return val.decode('ascii')
+    if key == 'user-agent':
+      return val
   raise KeyError('No user agent!')
 
 
@@ -161,8 +161,8 @@ class MetadataTest(unittest.TestCase):
 
   def setUp(self):
     self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-    self._server = grpc.server((_GenericHandler(weakref.proxy(self)),),
-                               self._server_pool)
+    self._server = grpc.server(
+        self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),))
     port = self._server.add_insecure_port('[::]:0')
     self._server.start()
     self._channel = grpc.insecure_channel('localhost:%d' % port,
@@ -173,8 +173,8 @@ class MetadataTest(unittest.TestCase):
 
   def testUnaryUnary(self):
     multi_callable = self._channel.unary_unary(_UNARY_UNARY)
-    unused_response, call = multi_callable(
-        _REQUEST, metadata=_CLIENT_METADATA, with_call=True)
+    unused_response, call = multi_callable.with_call(
+        _REQUEST, metadata=_CLIENT_METADATA)
     self.assertTrue(test_common.metadata_transmitted(
         _SERVER_INITIAL_METADATA, call.initial_metadata()))
     self.assertTrue(test_common.metadata_transmitted(
@@ -192,9 +192,9 @@ class MetadataTest(unittest.TestCase):
 
   def testStreamUnary(self):
     multi_callable = self._channel.stream_unary(_STREAM_UNARY)
-    unused_response, call = multi_callable(
+    unused_response, call = multi_callable.with_call(
         [_REQUEST] * test_constants.STREAM_LENGTH,
-        metadata=_CLIENT_METADATA, with_call=True)
+        metadata=_CLIENT_METADATA)
     self.assertTrue(test_common.metadata_transmitted(
         _SERVER_INITIAL_METADATA, call.initial_metadata()))
     self.assertTrue(test_common.metadata_transmitted(
diff --git a/src/python/grpcio/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py
similarity index 88%
rename from src/python/grpcio/tests/unit/_rpc_test.py
rename to src/python/grpcio_tests/tests/unit/_rpc_test.py
index 8407593c86d37c3365ed4013f0437addc4ed4318..59bf240d286bc522053a2e6fa843f68d61d83372 100644
--- a/src/python/grpcio/tests/unit/_rpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py
@@ -27,7 +27,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Test of gRPC Python's application-layer API."""
+"""Test of RPCs made against gRPC Python's application-layer API."""
 
 import itertools
 import threading
@@ -45,10 +45,10 @@ _DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
 _SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
 _DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
 
-_UNARY_UNARY = b'/test/UnaryUnary'
-_UNARY_STREAM = b'/test/UnaryStream'
-_STREAM_UNARY = b'/test/StreamUnary'
-_STREAM_STREAM = b'/test/StreamStream'
+_UNARY_UNARY = '/test/UnaryUnary'
+_UNARY_STREAM = '/test/UnaryStream'
+_STREAM_UNARY = '/test/StreamUnary'
+_STREAM_STREAM = '/test/StreamStream'
 
 
 class _Callback(object):
@@ -79,7 +79,7 @@ class _Handler(object):
   def handle_unary_unary(self, request, servicer_context):
     self._control.control()
     if servicer_context is not None:
-      servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+      servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
     return request
 
   def handle_unary_stream(self, request, servicer_context):
@@ -88,7 +88,7 @@ class _Handler(object):
       yield request
     self._control.control()
     if servicer_context is not None:
-      servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+      servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
 
   def handle_stream_unary(self, request_iterator, servicer_context):
     if servicer_context is not None:
@@ -100,13 +100,13 @@ class _Handler(object):
       response_elements.append(request)
     self._control.control()
     if servicer_context is not None:
-      servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+      servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
     return b''.join(response_elements)
 
   def handle_stream_stream(self, request_iterator, servicer_context):
     self._control.control()
     if servicer_context is not None:
-      servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+      servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
     for request in request_iterator:
       self._control.control()
       yield request
@@ -184,8 +184,8 @@ class RPCTest(unittest.TestCase):
     self._handler = _Handler(self._control)
     self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
 
-    self._server = grpc.server((), self._server_pool)
-    port = self._server.add_insecure_port(b'[::]:0')
+    self._server = grpc.server(self._server_pool)
+    port = self._server.add_insecure_port('[::]:0')
     self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
     self._server.start()
 
@@ -195,7 +195,7 @@ class RPCTest(unittest.TestCase):
     request = b'abc'
 
     with self.assertRaises(grpc.RpcError) as exception_context:
-      self._channel.unary_unary(b'NoSuchMethod')(request)
+      self._channel.unary_unary('NoSuchMethod')(request)
 
     self.assertEqual(
         grpc.StatusCode.UNIMPLEMENTED, exception_context.exception.code())
@@ -207,7 +207,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_unary_multi_callable(self._channel)
     response = multi_callable(
         request, metadata=(
-            (b'test', b'SuccessfulUnaryRequestBlockingUnaryResponse'),))
+            ('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
 
     self.assertEqual(expected_response, response)
 
@@ -216,10 +216,9 @@ class RPCTest(unittest.TestCase):
     expected_response = self._handler.handle_unary_unary(request, None)
 
     multi_callable = _unary_unary_multi_callable(self._channel)
-    response, call = multi_callable(
+    response, call = multi_callable.with_call(
         request, metadata=(
-            (b'test', b'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),),
-        with_call=True)
+            ('test', 'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
 
     self.assertEqual(expected_response, response)
     self.assertIs(grpc.StatusCode.OK, call.code())
@@ -231,7 +230,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_unary_multi_callable(self._channel)
     response_future = multi_callable.future(
         request, metadata=(
-            (b'test', b'SuccessfulUnaryRequestFutureUnaryResponse'),))
+            ('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
     response = response_future.result()
 
     self.assertEqual(expected_response, response)
@@ -243,7 +242,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_stream_multi_callable(self._channel)
     response_iterator = multi_callable(
         request,
-        metadata=((b'test', b'SuccessfulUnaryRequestStreamResponse'),))
+        metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
     responses = tuple(response_iterator)
 
     self.assertSequenceEqual(expected_responses, responses)
@@ -256,7 +255,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _stream_unary_multi_callable(self._channel)
     response = multi_callable(
         request_iterator,
-        metadata=((b'test', b'SuccessfulStreamRequestBlockingUnaryResponse'),))
+        metadata=(('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),))
 
     self.assertEqual(expected_response, response)
 
@@ -266,11 +265,11 @@ class RPCTest(unittest.TestCase):
     request_iterator = iter(requests)
 
     multi_callable = _stream_unary_multi_callable(self._channel)
-    response, call = multi_callable(
+    response, call = multi_callable.with_call(
         request_iterator,
         metadata=(
-            (b'test', b'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),
-        ), with_call=True)
+            ('test', 'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),
+        ))
 
     self.assertEqual(expected_response, response)
     self.assertIs(grpc.StatusCode.OK, call.code())
@@ -284,7 +283,7 @@ class RPCTest(unittest.TestCase):
     response_future = multi_callable.future(
         request_iterator,
         metadata=(
-            (b'test', b'SuccessfulStreamRequestFutureUnaryResponse'),))
+            ('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
     response = response_future.result()
 
     self.assertEqual(expected_response, response)
@@ -298,7 +297,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _stream_stream_multi_callable(self._channel)
     response_iterator = multi_callable(
         request_iterator,
-        metadata=((b'test', b'SuccessfulStreamRequestStreamResponse'),))
+        metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
     responses = tuple(response_iterator)
 
     self.assertSequenceEqual(expected_responses, responses)
@@ -313,9 +312,9 @@ class RPCTest(unittest.TestCase):
 
     multi_callable = _unary_unary_multi_callable(self._channel)
     first_response = multi_callable(
-        first_request, metadata=((b'test', b'SequentialInvocations'),))
+        first_request, metadata=(('test', 'SequentialInvocations'),))
     second_response = multi_callable(
-        second_request, metadata=((b'test', b'SequentialInvocations'),))
+        second_request, metadata=(('test', 'SequentialInvocations'),))
 
     self.assertEqual(expected_first_response, first_response)
     self.assertEqual(expected_second_response, second_response)
@@ -332,7 +331,7 @@ class RPCTest(unittest.TestCase):
       request_iterator = iter(requests)
       response_future = pool.submit(
           multi_callable, request_iterator,
-          metadata=((b'test', b'ConcurrentBlockingInvocations'),))
+          metadata=(('test', 'ConcurrentBlockingInvocations'),))
       response_futures[index] = response_future
     responses = tuple(
         response_future.result() for response_future in response_futures)
@@ -351,7 +350,7 @@ class RPCTest(unittest.TestCase):
       request_iterator = iter(requests)
       response_future = multi_callable.future(
           request_iterator,
-          metadata=((b'test', b'ConcurrentFutureInvocations'),))
+          metadata=(('test', 'ConcurrentFutureInvocations'),))
       response_futures[index] = response_future
     responses = tuple(
         response_future.result() for response_future in response_futures)
@@ -381,8 +380,8 @@ class RPCTest(unittest.TestCase):
       inner_response_future = multi_callable.future(
           request,
           metadata=(
-              (b'test',
-               b'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
+              ('test',
+               'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
       outer_response_future = pool.submit(wrap_future(inner_response_future))
       response_futures[index] = outer_response_future
 
@@ -401,7 +400,7 @@ class RPCTest(unittest.TestCase):
     response_iterator = multi_callable(
         request,
         metadata=(
-            (b'test', b'ConsumingOneStreamResponseUnaryRequest'),))
+            ('test', 'ConsumingOneStreamResponseUnaryRequest'),))
     next(response_iterator)
 
   def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
@@ -411,7 +410,7 @@ class RPCTest(unittest.TestCase):
     response_iterator = multi_callable(
         request,
         metadata=(
-            (b'test', b'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
+            ('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
     for _ in range(test_constants.STREAM_LENGTH // 2):
       next(response_iterator)
 
@@ -423,7 +422,7 @@ class RPCTest(unittest.TestCase):
     response_iterator = multi_callable(
         request_iterator,
         metadata=(
-            (b'test', b'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
+            ('test', 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
     for _ in range(test_constants.STREAM_LENGTH // 2):
       next(response_iterator)
 
@@ -435,7 +434,7 @@ class RPCTest(unittest.TestCase):
     response_iterator = multi_callable(
         request_iterator,
         metadata=(
-            (b'test', b'ConsumingTooManyStreamResponsesStreamRequest'),))
+            ('test', 'ConsumingTooManyStreamResponsesStreamRequest'),))
     for _ in range(test_constants.STREAM_LENGTH):
       next(response_iterator)
     for _ in range(test_constants.STREAM_LENGTH):
@@ -454,7 +453,7 @@ class RPCTest(unittest.TestCase):
     with self._control.pause():
       response_future = multi_callable.future(
           request,
-          metadata=((b'test', b'CancelledUnaryRequestUnaryResponse'),))
+          metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
       response_future.cancel()
 
     self.assertTrue(response_future.cancelled())
@@ -469,7 +468,7 @@ class RPCTest(unittest.TestCase):
     with self._control.pause():
       response_iterator = multi_callable(
           request,
-          metadata=((b'test', b'CancelledUnaryRequestStreamResponse'),))
+          metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
       self._control.block_until_paused()
       response_iterator.cancel()
 
@@ -489,7 +488,7 @@ class RPCTest(unittest.TestCase):
     with self._control.pause():
       response_future = multi_callable.future(
           request_iterator,
-          metadata=((b'test', b'CancelledStreamRequestUnaryResponse'),))
+          metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
       self._control.block_until_paused()
       response_future.cancel()
 
@@ -509,7 +508,7 @@ class RPCTest(unittest.TestCase):
     with self._control.pause():
       response_iterator = multi_callable(
           request_iterator,
-          metadata=((b'test', b'CancelledStreamRequestStreamResponse'),))
+          metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
       response_iterator.cancel()
 
     with self.assertRaises(grpc.RpcError):
@@ -525,10 +524,9 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_unary_multi_callable(self._channel)
     with self._control.pause():
       with self.assertRaises(grpc.RpcError) as exception_context:
-        multi_callable(
+        multi_callable.with_call(
             request, timeout=test_constants.SHORT_TIMEOUT,
-            metadata=((b'test', b'ExpiredUnaryRequestBlockingUnaryResponse'),),
-            with_call=True)
+            metadata=(('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
 
     self.assertIsNotNone(exception_context.exception.initial_metadata())
     self.assertIs(
@@ -544,7 +542,7 @@ class RPCTest(unittest.TestCase):
     with self._control.pause():
       response_future = multi_callable.future(
           request, timeout=test_constants.SHORT_TIMEOUT,
-          metadata=((b'test', b'ExpiredUnaryRequestFutureUnaryResponse'),))
+          metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
       response_future.add_done_callback(callback)
       value_passed_to_callback = callback.value()
 
@@ -569,7 +567,7 @@ class RPCTest(unittest.TestCase):
       with self.assertRaises(grpc.RpcError) as exception_context:
         response_iterator = multi_callable(
             request, timeout=test_constants.SHORT_TIMEOUT,
-            metadata=((b'test', b'ExpiredUnaryRequestStreamResponse'),))
+            metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
         next(response_iterator)
 
     self.assertIs(
@@ -585,7 +583,7 @@ class RPCTest(unittest.TestCase):
       with self.assertRaises(grpc.RpcError) as exception_context:
         multi_callable(
             request_iterator, timeout=test_constants.SHORT_TIMEOUT,
-            metadata=((b'test', b'ExpiredStreamRequestBlockingUnaryResponse'),))
+            metadata=(('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
 
     self.assertIsNotNone(exception_context.exception.initial_metadata())
     self.assertIs(
@@ -602,7 +600,7 @@ class RPCTest(unittest.TestCase):
     with self._control.pause():
       response_future = multi_callable.future(
           request_iterator, timeout=test_constants.SHORT_TIMEOUT,
-          metadata=((b'test', b'ExpiredStreamRequestFutureUnaryResponse'),))
+          metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
       response_future.add_done_callback(callback)
       value_passed_to_callback = callback.value()
 
@@ -627,7 +625,7 @@ class RPCTest(unittest.TestCase):
       with self.assertRaises(grpc.RpcError) as exception_context:
         response_iterator = multi_callable(
             request_iterator, timeout=test_constants.SHORT_TIMEOUT,
-            metadata=((b'test', b'ExpiredStreamRequestStreamResponse'),))
+            metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
         next(response_iterator)
 
     self.assertIs(
@@ -640,10 +638,9 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_unary_multi_callable(self._channel)
     with self._control.fail():
       with self.assertRaises(grpc.RpcError) as exception_context:
-        multi_callable(
+        multi_callable.with_call(
             request,
-            metadata=((b'test', b'FailedUnaryRequestBlockingUnaryResponse'),),
-            with_call=True)
+            metadata=(('test', 'FailedUnaryRequestBlockingUnaryResponse'),))
 
     self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
 
@@ -655,7 +652,7 @@ class RPCTest(unittest.TestCase):
     with self._control.fail():
       response_future = multi_callable.future(
           request,
-          metadata=((b'test', b'FailedUnaryRequestFutureUnaryResponse'),))
+          metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
       response_future.add_done_callback(callback)
       value_passed_to_callback = callback.value()
 
@@ -675,7 +672,7 @@ class RPCTest(unittest.TestCase):
       with self._control.fail():
         response_iterator = multi_callable(
             request,
-            metadata=((b'test', b'FailedUnaryRequestStreamResponse'),))
+            metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
         next(response_iterator)
 
     self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
@@ -689,7 +686,7 @@ class RPCTest(unittest.TestCase):
       with self.assertRaises(grpc.RpcError) as exception_context:
         multi_callable(
             request_iterator,
-            metadata=((b'test', b'FailedStreamRequestBlockingUnaryResponse'),))
+            metadata=(('test', 'FailedStreamRequestBlockingUnaryResponse'),))
 
     self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
 
@@ -702,7 +699,7 @@ class RPCTest(unittest.TestCase):
     with self._control.fail():
       response_future = multi_callable.future(
           request_iterator,
-          metadata=((b'test', b'FailedStreamRequestFutureUnaryResponse'),))
+          metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
       response_future.add_done_callback(callback)
       value_passed_to_callback = callback.value()
 
@@ -723,7 +720,7 @@ class RPCTest(unittest.TestCase):
       with self.assertRaises(grpc.RpcError) as exception_context:
         response_iterator = multi_callable(
             request_iterator,
-            metadata=((b'test', b'FailedStreamRequestStreamResponse'),))
+            metadata=(('test', 'FailedStreamRequestStreamResponse'),))
         tuple(response_iterator)
 
     self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
@@ -735,7 +732,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_unary_multi_callable(self._channel)
     multi_callable.future(
         request,
-        metadata=((b'test', b'IgnoredUnaryRequestFutureUnaryResponse'),))
+        metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
 
   def testIgnoredUnaryRequestStreamResponse(self):
     request = b'\x37\x17'
@@ -743,7 +740,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _unary_stream_multi_callable(self._channel)
     multi_callable(
         request,
-        metadata=((b'test', b'IgnoredUnaryRequestStreamResponse'),))
+        metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
 
   def testIgnoredStreamRequestFutureUnaryResponse(self):
     requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
@@ -752,7 +749,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _stream_unary_multi_callable(self._channel)
     multi_callable.future(
         request_iterator,
-        metadata=((b'test', b'IgnoredStreamRequestFutureUnaryResponse'),))
+        metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
 
   def testIgnoredStreamRequestStreamResponse(self):
     requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
@@ -761,7 +758,7 @@ class RPCTest(unittest.TestCase):
     multi_callable = _stream_stream_multi_callable(self._channel)
     multi_callable(
         request_iterator,
-        metadata=((b'test', b'IgnoredStreamRequestStreamResponse'),))
+        metadata=(('test', 'IgnoredStreamRequestStreamResponse'),))
 
 
 if __name__ == '__main__':
diff --git a/src/python/grpcio/tests/unit/_sanity/__init__.py b/src/python/grpcio_tests/tests/unit/_sanity/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_sanity/__init__.py
rename to src/python/grpcio_tests/tests/unit/_sanity/__init__.py
diff --git a/src/python/grpcio/tests/unit/_sanity/_sanity_test.py b/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
similarity index 90%
rename from src/python/grpcio/tests/unit/_sanity/_sanity_test.py
rename to src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
index 0a5a715c0e1c66bfcff7a05cbcb9238941e2e4aa..e9fdf217aef02f6eac1b9234b6ab8df91b02ecf6 100644
--- a/src/python/grpcio/tests/unit/_sanity/_sanity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
@@ -30,6 +30,9 @@
 import json
 import unittest
 
+import pkg_resources
+import six
+
 import tests
 
 
@@ -44,8 +47,10 @@ class Sanity(unittest.TestCase):
         for test_case_class in tests._loader.iterate_suite_cases(loader.suite)]
     test_suite_names = sorted(set(test_suite_names))
 
-    with open('src/python/grpcio/tests/tests.json') as tests_json_file:
-      tests_json = json.load(tests_json_file)
+    tests_json_string = pkg_resources.resource_string('tests', 'tests.json')
+    if six.PY3:
+      tests_json_string = tests_json_string.decode()
+    tests_json = json.loads(tests_json_string)
     self.assertListEqual(test_suite_names, tests_json)
 
 
diff --git a/src/python/grpcio/tests/unit/_thread_cleanup_test.py b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_thread_cleanup_test.py
rename to src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
diff --git a/src/python/grpcio/tests/unit/_links/__init__.py b/src/python/grpcio_tests/tests/unit/beta/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/_links/__init__.py
rename to src/python/grpcio_tests/tests/unit/beta/__init__.py
diff --git a/src/python/grpcio/tests/unit/beta/_beta_features_test.py b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/beta/_beta_features_test.py
rename to src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py b/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py
similarity index 72%
rename from src/python/grpcio/grpc/framework/interfaces/links/utilities.py
rename to src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py
index 6e4fd76d9325263b5fc7e81d257cbb20a140347c..5d826a269db073cf7241b3ecf1a41b1157524821 100644
--- a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py
@@ -27,18 +27,22 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Utilities provided as part of the links interface."""
+"""Tests of grpc.beta._connectivity_channel."""
 
-from grpc.framework.interfaces.links import links
+import unittest
 
+from grpc.beta import interfaces
 
-class _NullLink(links.Link):
-  """A do-nothing links.Link."""
 
-  def accept_ticket(self, ticket):
-    pass
+class ConnectivityStatesTest(unittest.TestCase):
 
-  def join_link(self, link):
-    pass
+  def testBetaConnectivityStates(self):
+    self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
+    self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
+    self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
+    self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
+    self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
 
-NULL_LINK = _NullLink()
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/beta/_face_interface_test.py b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/beta/_face_interface_test.py
rename to src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
diff --git a/src/python/grpcio/tests/unit/beta/_implementations_test.py b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/beta/_implementations_test.py
rename to src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
diff --git a/src/python/grpcio/tests/unit/beta/_not_found_test.py b/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/beta/_not_found_test.py
rename to src/python/grpcio_tests/tests/unit/beta/_not_found_test.py
diff --git a/src/python/grpcio/tests/unit/beta/_utilities_test.py b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py
similarity index 85%
rename from src/python/grpcio/tests/unit/beta/_utilities_test.py
rename to src/python/grpcio_tests/tests/unit/beta/_utilities_test.py
index 08ce98e75169149073bbf530cf645ce26a0ec2c5..90fe10c77ce8d4df6e3bdf8b727a6dbf492cfe76 100644
--- a/src/python/grpcio/tests/unit/beta/_utilities_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py
@@ -33,21 +33,12 @@ import threading
 import time
 import unittest
 
-from grpc._adapter import _low
-from grpc._adapter import _types
 from grpc.beta import implementations
 from grpc.beta import utilities
 from grpc.framework.foundation import future
 from tests.unit.framework.common import test_constants
 
 
-def _drive_completion_queue(completion_queue):
-  while True:
-    event = completion_queue.next(time.time() + 24 * 60 * 60)
-    if event.type == _types.EventType.QUEUE_SHUTDOWN:
-      break
-
-
 class _Callback(object):
 
   def __init__(self):
@@ -87,13 +78,9 @@ class ChannelConnectivityTest(unittest.TestCase):
     self.assertFalse(ready_future.running())
 
   def test_immediately_connectable_channel_connectivity(self):
-    server_completion_queue = _low.CompletionQueue()
-    server = _low.Server(server_completion_queue, [])
-    port = server.add_http2_port('[::]:0')
+    server = implementations.server({})
+    port = server.add_insecure_port('[::]:0')
     server.start()
-    server_completion_queue_thread = threading.Thread(
-        target=_drive_completion_queue, args=(server_completion_queue,))
-    server_completion_queue_thread.start()
     channel = implementations.insecure_channel('localhost', port)
     callback = _Callback()
 
@@ -114,9 +101,7 @@ class ChannelConnectivityTest(unittest.TestCase):
       self.assertFalse(ready_future.running())
     finally:
       ready_future.cancel()
-      server.shutdown()
-      server_completion_queue.shutdown()
-      server_completion_queue_thread.join()
+      server.stop(0)
 
 
 if __name__ == '__main__':
diff --git a/src/python/grpcio/tests/unit/beta/test_utilities.py b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
similarity index 97%
rename from src/python/grpcio/tests/unit/beta/test_utilities.py
rename to src/python/grpcio_tests/tests/unit/beta/test_utilities.py
index 8ccad04e056fd34ba49c0f8f2cedcb248782f08c..692da9c97d78866411a4be9d1c74b0059782d704 100644
--- a/src/python/grpcio/tests/unit/beta/test_utilities.py
+++ b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
@@ -51,5 +51,5 @@ def not_really_secure_channel(
   target = '%s:%d' % (host, port)
   channel = grpc.secure_channel(
       target, channel_credentials,
-      ((b'grpc.ssl_target_name_override', server_host_override,),))
+      (('grpc.ssl_target_name_override', server_host_override,),))
   return implementations.Channel(channel)
diff --git a/src/python/grpcio/tests/unit/credentials/README b/src/python/grpcio_tests/tests/unit/credentials/README
similarity index 100%
rename from src/python/grpcio/tests/unit/credentials/README
rename to src/python/grpcio_tests/tests/unit/credentials/README
diff --git a/src/python/grpcio/tests/unit/credentials/ca.pem b/src/python/grpcio_tests/tests/unit/credentials/ca.pem
similarity index 100%
rename from src/python/grpcio/tests/unit/credentials/ca.pem
rename to src/python/grpcio_tests/tests/unit/credentials/ca.pem
diff --git a/src/python/grpcio/tests/unit/credentials/server1.key b/src/python/grpcio_tests/tests/unit/credentials/server1.key
similarity index 100%
rename from src/python/grpcio/tests/unit/credentials/server1.key
rename to src/python/grpcio_tests/tests/unit/credentials/server1.key
diff --git a/src/python/grpcio/tests/unit/credentials/server1.pem b/src/python/grpcio_tests/tests/unit/credentials/server1.pem
similarity index 100%
rename from src/python/grpcio/tests/unit/credentials/server1.pem
rename to src/python/grpcio_tests/tests/unit/credentials/server1.pem
diff --git a/src/python/grpcio/tests/unit/beta/__init__.py b/src/python/grpcio_tests/tests/unit/framework/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/beta/__init__.py
rename to src/python/grpcio_tests/tests/unit/framework/__init__.py
diff --git a/src/python/grpcio/tests/unit/framework/__init__.py b/src/python/grpcio_tests/tests/unit/framework/common/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/__init__.py
rename to src/python/grpcio_tests/tests/unit/framework/common/__init__.py
diff --git a/src/python/grpcio/tests/unit/framework/common/test_constants.py b/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/common/test_constants.py
rename to src/python/grpcio_tests/tests/unit/framework/common/test_constants.py
diff --git a/src/python/grpcio/tests/unit/framework/common/test_control.py b/src/python/grpcio_tests/tests/unit/framework/common/test_control.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/common/test_control.py
rename to src/python/grpcio_tests/tests/unit/framework/common/test_control.py
diff --git a/src/python/grpcio/tests/unit/framework/common/test_coverage.py b/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/common/test_coverage.py
rename to src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py
diff --git a/src/python/grpcio/tests/unit/framework/common/__init__.py b/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/common/__init__.py
rename to src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py
diff --git a/src/python/grpcio/tests/unit/framework/foundation/_logging_pool_test.py b/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/foundation/_logging_pool_test.py
rename to src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py
diff --git a/src/python/grpcio/tests/unit/framework/foundation/stream_testing.py b/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/foundation/stream_testing.py
rename to src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py
diff --git a/src/python/grpcio/tests/unit/framework/core/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/core/__init__.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_3069_test_constant.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_3069_test_constant.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py
diff --git a/src/python/grpcio/tests/unit/framework/foundation/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/foundation/__init__.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_digest.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_digest.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
similarity index 94%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
index 791620307b506c8c1f1b2529311bcb38ed095aa9..d32208f9eb05990847d843ae7d0ebca01777a232 100644
--- a/src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
@@ -41,6 +41,7 @@ from concurrent import futures
 import six
 
 # test_interfaces is referenced from specification in this module.
+from grpc.framework.foundation import future
 from grpc.framework.foundation import logging_pool
 from grpc.framework.interfaces.face import face
 from tests.unit.framework.common import test_constants
@@ -159,6 +160,8 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
 
         test_messages.verify(request, response, self)
         self.assertIs(callback.future(), response_future)
+        self.assertIsNone(response_future.exception())
+        self.assertIsNone(response_future.traceback())
 
   def testSuccessfulUnaryRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -191,6 +194,8 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
 
         test_messages.verify(requests, response, self)
         self.assertIs(future_passed_to_callback, response_future)
+        self.assertIsNone(response_future.exception())
+        self.assertIsNone(response_future.traceback())
 
   def testSuccessfulStreamRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -301,6 +306,12 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
         self.assertIs(callback.future(), response_future)
         self.assertFalse(cancel_method_return_value)
         self.assertTrue(response_future.cancelled())
+        with self.assertRaises(future.CancelledError):
+          response_future.result()
+        with self.assertRaises(future.CancelledError):
+          response_future.exception()
+        with self.assertRaises(future.CancelledError):
+          response_future.traceback()
 
   def testCancelledUnaryRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -332,6 +343,12 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
         self.assertIs(callback.future(), response_future)
         self.assertFalse(cancel_method_return_value)
         self.assertTrue(response_future.cancelled())
+        with self.assertRaises(future.CancelledError):
+          response_future.result()
+        with self.assertRaises(future.CancelledError):
+          response_future.exception()
+        with self.assertRaises(future.CancelledError):
+          response_future.traceback()
 
   def testCancelledStreamRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -363,6 +380,9 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
               response_future.exception(), face.ExpirationError)
           with self.assertRaises(face.ExpirationError):
             response_future.result()
+          self.assertIsInstance(
+              response_future.exception(), face.AbortionError)
+          self.assertIsNotNone(response_future.traceback())
 
   def testExpiredUnaryRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -392,6 +412,9 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
               response_future.exception(), face.ExpirationError)
           with self.assertRaises(face.ExpirationError):
             response_future.result()
+          self.assertIsInstance(
+              response_future.exception(), face.AbortionError)
+          self.assertIsNotNone(response_future.traceback())
 
   def testExpiredStreamRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -426,6 +449,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
               response_future.exception(), face.ExpirationError)
           with self.assertRaises(face.ExpirationError):
             response_future.result()
+          self.assertIsNotNone(response_future.traceback())
 
   def testFailedUnaryRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
@@ -463,6 +487,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.
               response_future.exception(), face.ExpirationError)
           with self.assertRaises(face.ExpirationError):
             response_future.result()
+          self.assertIsNotNone(response_future.traceback())
 
   def testFailedStreamRequestStreamResponse(self):
     for (group, method), test_messages_sequence in (
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_invocation.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_invocation.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_service.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_stock_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/_stock_service.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/test_cases.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/test_interfaces.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
similarity index 100%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/test_interfaces.py
rename to src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py
diff --git a/src/python/grpcio/tests/unit/resources.py b/src/python/grpcio_tests/tests/unit/resources.py
similarity index 100%
rename from src/python/grpcio/tests/unit/resources.py
rename to src/python/grpcio_tests/tests/unit/resources.py
diff --git a/src/python/grpcio/tests/unit/test_common.py b/src/python/grpcio_tests/tests/unit/test_common.py
similarity index 74%
rename from src/python/grpcio/tests/unit/test_common.py
rename to src/python/grpcio_tests/tests/unit/test_common.py
index b779f65e7e9c121580beee76be7489cc4d7f8603..cd71bd80d75462ad6c14f8bd0280106bd591f93a 100644
--- a/src/python/grpcio/tests/unit/test_common.py
+++ b/src/python/grpcio_tests/tests/unit/test_common.py
@@ -31,12 +31,13 @@
 
 import collections
 
+import grpc
 import six
 
-INVOCATION_INITIAL_METADATA = ((b'0', b'abc'), (b'1', b'def'), (b'2', b'ghi'),)
-SERVICE_INITIAL_METADATA = ((b'3', b'jkl'), (b'4', b'mno'), (b'5', b'pqr'),)
-SERVICE_TERMINAL_METADATA = ((b'6', b'stu'), (b'7', b'vwx'), (b'8', b'yza'),)
-DETAILS = b'test details'
+INVOCATION_INITIAL_METADATA = (('0', 'abc'), ('1', 'def'), ('2', 'ghi'),)
+SERVICE_INITIAL_METADATA = (('3', 'jkl'), ('4', 'mno'), ('5', 'pqr'),)
+SERVICE_TERMINAL_METADATA = (('6', 'stu'), ('7', 'vwx'), ('8', 'yza'),)
+DETAILS = 'test details'
 
 
 def metadata_transmitted(original_metadata, transmitted_metadata):
@@ -59,16 +60,10 @@ def metadata_transmitted(original_metadata, transmitted_metadata):
       original_metadata after having been transmitted via gRPC.
   """
   original = collections.defaultdict(list)
-  for key_value_pair in original_metadata:
-    key, value = tuple(key_value_pair)
-    if not isinstance(key, bytes):
-      key = key.encode()
-    if not isinstance(value, bytes):
-      value = value.encode()
+  for key, value in original_metadata:
     original[key].append(value)
   transmitted = collections.defaultdict(list)
-  for key_value_pair in transmitted_metadata:
-    key, value = tuple(key_value_pair)
+  for key, value in transmitted_metadata:
     transmitted[key].append(value)
 
   for key, values in six.iteritems(original):
@@ -84,3 +79,24 @@ def metadata_transmitted(original_metadata, transmitted_metadata):
       return False
   else:
     return True
+
+
+def test_secure_channel(
+    target, channel_credentials, server_host_override):
+  """Creates an insecure Channel to a remote host.
+
+  Args:
+    host: The name of the remote host to which to connect.
+    port: The port of the remote host to which to connect.
+    channel_credentials: The implementations.ChannelCredentials with which to
+      connect.
+    server_host_override: The target name used for SSL host name checking.
+
+  Returns:
+    An implementations.Channel to the remote host through which RPCs may be
+      conducted.
+  """
+  channel = grpc.secure_channel(
+      target, channel_credentials,
+      (('grpc.ssl_target_name_override', server_host_override,),))
+  return channel
diff --git a/src/ruby/ext/grpc/rb_byte_buffer.c b/src/ruby/ext/grpc/rb_byte_buffer.c
index 1172691116c82b12a2ef1abfc01239cccf130f3c..61b7c30315da882161c15ef52e110c97ad2a10cd 100644
--- a/src/ruby/ext/grpc/rb_byte_buffer.c
+++ b/src/ruby/ext/grpc/rb_byte_buffer.c
@@ -56,7 +56,10 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) {
     return Qnil;
   }
   rb_string = rb_str_buf_new(grpc_byte_buffer_length(buffer));
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  if (!grpc_byte_buffer_reader_init(&reader, buffer)) {
+    rb_raise(rb_eRuntimeError, "Error initializing byte buffer reader.");
+    return Qnil;
+  }
   while (grpc_byte_buffer_reader_next(&reader, &next) != 0) {
     rb_str_cat(rb_string, (const char *) GPR_SLICE_START_PTR(next),
                GPR_SLICE_LENGTH(next));
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index b436057c16353ac90551eec2bd3283b029117fd0..212612444368d78f611844d3de40d7cc8215b35f 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -63,27 +63,18 @@ static VALUE grpc_rb_sBatchResult;
  * grpc_metadata_array. */
 static VALUE grpc_rb_cMdAry;
 
-/* id_cq is the name of the hidden ivar that preserves a reference to a
- * completion queue */
-static ID id_cq;
-
-/* id_flags is the name of the hidden ivar that preserves the value of
- * the flags used to create metadata from a Hash */
-static ID id_flags;
-
 /* id_credentials is the name of the hidden ivar that preserves the value
  * of the credentials added to the call */
 static ID id_credentials;
 
-/* id_input_md is the name of the hidden ivar that preserves the hash used to
- * create metadata, so that references to the strings it contains last as long
- * as the call the metadata is added to. */
-static ID id_input_md;
-
 /* id_metadata is name of the attribute used to access the metadata hash
  * received by the call and subsequently saved on it. */
 static ID id_metadata;
 
+/* id_trailing_metadata is the name of the attribute used to access the trailing
+ * metadata hash received by the call and subsequently saved on it. */
+static ID id_trailing_metadata;
+
 /* id_status is name of the attribute used to access the status object
  * received by the call and subsequently saved on it. */
 static ID id_status;
@@ -101,14 +92,27 @@ static VALUE sym_message;
 static VALUE sym_status;
 static VALUE sym_cancelled;
 
+typedef struct grpc_rb_call {
+  grpc_call *wrapped;
+  grpc_completion_queue *queue;
+} grpc_rb_call;
+
+static void destroy_call(grpc_rb_call *call) {
+  /* Ensure that we only try to destroy the call once */
+  if (call->wrapped != NULL) {
+    grpc_call_destroy(call->wrapped);
+    call->wrapped = NULL;
+    grpc_rb_completion_queue_destroy(call->queue);
+    call->queue = NULL;
+  }
+}
+
 /* Destroys a Call. */
 static void grpc_rb_call_destroy(void *p) {
-  grpc_call* call = NULL;
   if (p == NULL) {
     return;
   }
-  call = (grpc_call *)p;
-  grpc_call_destroy(call);
+  destroy_call((grpc_rb_call*)p);
 }
 
 static size_t md_ary_datasize(const void *p) {
@@ -167,15 +171,15 @@ const char *grpc_call_error_detail_of(grpc_call_error err) {
 /* Called by clients to cancel an RPC on the server.
    Can be called multiple times, from any thread. */
 static VALUE grpc_rb_call_cancel(VALUE self) {
-  grpc_call *call = NULL;
+  grpc_rb_call *call = NULL;
   grpc_call_error err;
   if (RTYPEDDATA_DATA(self) == NULL) {
     //This call has been closed
     return Qnil;
   }
 
-  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
-  err = grpc_call_cancel(call, NULL);
+  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
+  err = grpc_call_cancel(call->wrapped, NULL);
   if (err != GRPC_CALL_OK) {
     rb_raise(grpc_rb_eCallError, "cancel failed: %s (code=%d)",
              grpc_call_error_detail_of(err), err);
@@ -189,10 +193,10 @@ static VALUE grpc_rb_call_cancel(VALUE self) {
    processed.
 */
 static VALUE grpc_rb_call_close(VALUE self) {
-  grpc_call *call = NULL;
-  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
+  grpc_rb_call *call = NULL;
+  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
   if(call != NULL) {
-    grpc_call_destroy(call);
+    destroy_call(call);
     RTYPEDDATA_DATA(self) = NULL;
   }
   return Qnil;
@@ -201,14 +205,14 @@ static VALUE grpc_rb_call_close(VALUE self) {
 /* Called to obtain the peer that this call is connected to. */
 static VALUE grpc_rb_call_get_peer(VALUE self) {
   VALUE res = Qnil;
-  grpc_call *call = NULL;
+  grpc_rb_call *call = NULL;
   char *peer = NULL;
   if (RTYPEDDATA_DATA(self) == NULL) {
     rb_raise(grpc_rb_eCallError, "Cannot get peer value on closed call");
     return Qnil;
   }
-  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
-  peer = grpc_call_get_peer(call);
+  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
+  peer = grpc_call_get_peer(call->wrapped);
   res = rb_str_new2(peer);
   gpr_free(peer);
 
@@ -217,16 +221,16 @@ static VALUE grpc_rb_call_get_peer(VALUE self) {
 
 /* Called to obtain the x509 cert of an authenticated peer. */
 static VALUE grpc_rb_call_get_peer_cert(VALUE self) {
-  grpc_call *call = NULL;
+  grpc_rb_call *call = NULL;
   VALUE res = Qnil;
   grpc_auth_context *ctx = NULL;
   if (RTYPEDDATA_DATA(self) == NULL) {
     rb_raise(grpc_rb_eCallError, "Cannot get peer cert on closed call");
     return Qnil;
   }
-  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
+  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
 
-  ctx = grpc_call_auth_context(call);
+  ctx = grpc_call_auth_context(call->wrapped);
 
   if (!ctx || !grpc_auth_context_peer_is_authenticated(ctx)) {
     return Qnil;
@@ -296,6 +300,30 @@ static VALUE grpc_rb_call_set_metadata(VALUE self, VALUE metadata) {
   return rb_ivar_set(self, id_metadata, metadata);
 }
 
+/*
+  call-seq:
+  trailing_metadata = call.trailing_metadata
+
+  Gets the trailing metadata object saved on the call */
+static VALUE grpc_rb_call_get_trailing_metadata(VALUE self) {
+  return rb_ivar_get(self, id_trailing_metadata);
+}
+
+/*
+  call-seq:
+  call.trailing_metadata = trailing_metadata
+
+  Saves the trailing metadata hash on the call. */
+static VALUE grpc_rb_call_set_trailing_metadata(VALUE self, VALUE metadata) {
+  if (!NIL_P(metadata) && TYPE(metadata) != T_HASH) {
+    rb_raise(rb_eTypeError, "bad metadata: got:<%s> want: <Hash>",
+             rb_obj_classname(metadata));
+    return Qnil;
+  }
+
+  return rb_ivar_set(self, id_trailing_metadata, metadata);
+}
+
 /*
   call-seq:
   write_flag = call.write_flag
@@ -326,21 +354,23 @@ static VALUE grpc_rb_call_set_write_flag(VALUE self, VALUE write_flag) {
 
   Sets credentials on a call */
 static VALUE grpc_rb_call_set_credentials(VALUE self, VALUE credentials) {
-  grpc_call *call = NULL;
+  grpc_rb_call *call = NULL;
   grpc_call_credentials *creds;
   grpc_call_error err;
   if (RTYPEDDATA_DATA(self) == NULL) {
     rb_raise(grpc_rb_eCallError, "Cannot set credentials of closed call");
     return Qnil;
   }
-  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
+  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
   creds = grpc_rb_get_wrapped_call_credentials(credentials);
-  err = grpc_call_set_credentials(call, creds);
+  err = grpc_call_set_credentials(call->wrapped, creds);
   if (err != GRPC_CALL_OK) {
     rb_raise(grpc_rb_eCallError,
              "grpc_call_set_credentials failed with %s (code=%d)",
              grpc_call_error_detail_of(err), err);
   }
+  /* We need the credentials to be alive for as long as the call is alive,
+     but we don't care about destruction order. */
   rb_ivar_set(self, id_credentials, credentials);
   return Qnil;
 }
@@ -733,7 +763,6 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) {
 }
 
 /* call-seq:
-   cq = CompletionQueue.new
    ops = {
      GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>,
      GRPC::Core::CallOps::SEND_MESSAGE => <op_value>,
@@ -741,7 +770,7 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) {
    }
    tag = Object.new
    timeout = 10
-   call.start_batch(cq, tag, timeout, ops)
+   call.start_batch(tag, timeout, ops)
 
    Start a batch of operations defined in the array ops; when complete, post a
    completion of type 'tag' to the completion queue bound to the call.
@@ -750,20 +779,20 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) {
    The order of ops specified in the batch has no significance.
    Only one operation of each type can be active at once in any given
    batch */
-static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
-                                    VALUE timeout, VALUE ops_hash) {
+static VALUE grpc_rb_call_run_batch(VALUE self, VALUE ops_hash) {
   run_batch_stack st;
-  grpc_call *call = NULL;
+  grpc_rb_call *call = NULL;
   grpc_event ev;
   grpc_call_error err;
   VALUE result = Qnil;
   VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
   unsigned write_flag = 0;
+  void *tag = (void*)&st;
   if (RTYPEDDATA_DATA(self) == NULL) {
     rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call");
     return Qnil;
   }
-  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
+  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
 
   /* Validate the ops args, adding them to a ruby array */
   if (TYPE(ops_hash) != T_HASH) {
@@ -778,7 +807,7 @@ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
 
   /* call grpc_call_start_batch, then wait for it to complete using
    * pluck_event */
-  err = grpc_call_start_batch(call, st.ops, st.op_num, ROBJECT(tag), NULL);
+  err = grpc_call_start_batch(call->wrapped, st.ops, st.op_num, tag, NULL);
   if (err != GRPC_CALL_OK) {
     grpc_run_batch_stack_cleanup(&st);
     rb_raise(grpc_rb_eCallError,
@@ -786,13 +815,11 @@ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
              grpc_call_error_detail_of(err), err);
     return Qnil;
   }
-  ev = grpc_rb_completion_queue_pluck_event(cqueue, tag, timeout);
-  if (ev.type == GRPC_QUEUE_TIMEOUT) {
-    grpc_run_batch_stack_cleanup(&st);
-    rb_raise(grpc_rb_eOutOfTime, "grpc_call_start_batch timed out");
-    return Qnil;
+  ev = rb_completion_queue_pluck(call->queue, tag,
+                                 gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+  if (!ev.success) {
+    rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow");
   }
-
   /* Build and return the BatchResult struct result,
      if there is an error, it's reflected in the status */
   result = grpc_run_batch_stack_build_result(&st);
@@ -900,7 +927,7 @@ void Init_grpc_call() {
                    1);
 
   /* Add ruby analogues of the Call methods. */
-  rb_define_method(grpc_rb_cCall, "run_batch", grpc_rb_call_run_batch, 4);
+  rb_define_method(grpc_rb_cCall, "run_batch", grpc_rb_call_run_batch, 1);
   rb_define_method(grpc_rb_cCall, "cancel", grpc_rb_call_cancel, 0);
   rb_define_method(grpc_rb_cCall, "close", grpc_rb_call_close, 0);
   rb_define_method(grpc_rb_cCall, "peer", grpc_rb_call_get_peer, 0);
@@ -909,6 +936,10 @@ void Init_grpc_call() {
   rb_define_method(grpc_rb_cCall, "status=", grpc_rb_call_set_status, 1);
   rb_define_method(grpc_rb_cCall, "metadata", grpc_rb_call_get_metadata, 0);
   rb_define_method(grpc_rb_cCall, "metadata=", grpc_rb_call_set_metadata, 1);
+  rb_define_method(grpc_rb_cCall, "trailing_metadata",
+                   grpc_rb_call_get_trailing_metadata, 0);
+  rb_define_method(grpc_rb_cCall, "trailing_metadata=",
+                   grpc_rb_call_set_trailing_metadata, 1);
   rb_define_method(grpc_rb_cCall, "write_flag", grpc_rb_call_get_write_flag, 0);
   rb_define_method(grpc_rb_cCall, "write_flag=", grpc_rb_call_set_write_flag,
                    1);
@@ -917,13 +948,11 @@ void Init_grpc_call() {
 
   /* Ids used to support call attributes */
   id_metadata = rb_intern("metadata");
+  id_trailing_metadata = rb_intern("trailing_metadata");
   id_status = rb_intern("status");
   id_write_flag = rb_intern("write_flag");
 
   /* Ids used by the c wrapping internals. */
-  id_cq = rb_intern("__cq");
-  id_flags = rb_intern("__flags");
-  id_input_md = rb_intern("__input_md");
   id_credentials = rb_intern("__credentials");
 
   /* Ids used in constructing the batch result. */
@@ -947,15 +976,19 @@ void Init_grpc_call() {
 
 /* Gets the call from the ruby object */
 grpc_call *grpc_rb_get_wrapped_call(VALUE v) {
-  grpc_call *c = NULL;
-  TypedData_Get_Struct(v, grpc_call, &grpc_call_data_type, c);
-  return c;
+  grpc_rb_call *call = NULL;
+  TypedData_Get_Struct(v, grpc_rb_call, &grpc_call_data_type, call);
+  return call->wrapped;
 }
 
 /* Obtains the wrapped object for a given call */
-VALUE grpc_rb_wrap_call(grpc_call *c) {
-  if (c == NULL) {
+VALUE grpc_rb_wrap_call(grpc_call *c, grpc_completion_queue *q) {
+  grpc_rb_call *wrapper;
+  if (c == NULL || q == NULL) {
     return Qnil;
   }
-  return TypedData_Wrap_Struct(grpc_rb_cCall, &grpc_call_data_type, c);
+  wrapper = ALLOC(grpc_rb_call);
+  wrapper->wrapped = c;
+  wrapper->queue = q;
+  return TypedData_Wrap_Struct(grpc_rb_cCall, &grpc_call_data_type, wrapper);
 }
diff --git a/src/ruby/ext/grpc/rb_call.h b/src/ruby/ext/grpc/rb_call.h
index 24adb3477ba26b00620d37a2f142b33c52340c90..56becdc5a4edfaf6101d17bb531314e6fd914d6b 100644
--- a/src/ruby/ext/grpc/rb_call.h
+++ b/src/ruby/ext/grpc/rb_call.h
@@ -42,7 +42,7 @@
 grpc_call* grpc_rb_get_wrapped_call(VALUE v);
 
 /* Gets the VALUE corresponding to given grpc_call. */
-VALUE grpc_rb_wrap_call(grpc_call* c);
+VALUE grpc_rb_wrap_call(grpc_call *c, grpc_completion_queue *q);
 
 /* Provides the details of an call error */
 const char* grpc_call_error_detail_of(grpc_call_error err);
diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c
index 79ca5b32cedaaba41887bd3ae5ff4e5e25a1666d..9b6675da8465d8119a1020d7abbd7100f35fd87b 100644
--- a/src/ruby/ext/grpc/rb_call_credentials.c
+++ b/src/ruby/ext/grpc/rb_call_credentials.c
@@ -211,35 +211,6 @@ VALUE grpc_rb_wrap_call_credentials(grpc_call_credentials *c, VALUE mark) {
   return rb_wrapper;
 }
 
-/* Clones CallCredentials instances.
-   Gives CallCredentials a consistent implementation of Ruby's object copy/dup
-   protocol. */
-static VALUE grpc_rb_call_credentials_init_copy(VALUE copy, VALUE orig) {
-  grpc_rb_call_credentials *orig_cred = NULL;
-  grpc_rb_call_credentials *copy_cred = NULL;
-
-  if (copy == orig) {
-    return copy;
-  }
-
-  /* Raise an error if orig is not a credentials object or a subclass. */
-  if (TYPE(orig) != T_DATA ||
-      RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_call_credentials_free) {
-    rb_raise(rb_eTypeError, "not a %s",
-             rb_obj_classname(grpc_rb_cCallCredentials));
-  }
-
-  TypedData_Get_Struct(orig, grpc_rb_call_credentials,
-                       &grpc_rb_call_credentials_data_type, orig_cred);
-  TypedData_Get_Struct(copy, grpc_rb_call_credentials,
-                       &grpc_rb_call_credentials_data_type, copy_cred);
-
-  /* use ruby's MEMCPY to make a byte-for-byte copy of the credentials
-   * wrapper object. */
-  MEMCPY(copy_cred, orig_cred, grpc_rb_call_credentials, 1);
-  return copy;
-}
-
 /* The attribute used on the mark object to hold the callback */
 static ID id_callback;
 
@@ -308,7 +279,7 @@ void Init_grpc_call_credentials() {
   rb_define_method(grpc_rb_cCallCredentials, "initialize",
                    grpc_rb_call_credentials_init, 1);
   rb_define_method(grpc_rb_cCallCredentials, "initialize_copy",
-                   grpc_rb_call_credentials_init_copy, 1);
+                   grpc_rb_cannot_init_copy, 1);
   rb_define_method(grpc_rb_cCallCredentials, "compose",
                    grpc_rb_call_credentials_compose, -1);
 
diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c
index 6943c93d4aad0aa3a7b9003f9e30c8bbbf0f3d67..18a15d01252267445b32d7ae017fae2ad8105ad5 100644
--- a/src/ruby/ext/grpc/rb_channel.c
+++ b/src/ruby/ext/grpc/rb_channel.c
@@ -39,6 +39,7 @@
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 #include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
 #include "rb_grpc.h"
 #include "rb_call.h"
 #include "rb_channel_args.h"
@@ -55,11 +56,6 @@ static ID id_channel;
  * GCed before the channel */
 static ID id_target;
 
-/* id_cqueue is the name of the hidden ivar that preserves a reference to the
- * completion queue used to create the call, preserved so that it does not get
- * GCed before the channel */
-static ID id_cqueue;
-
 /* id_insecure_channel is used to indicate that a channel is insecure */
 static VALUE id_insecure_channel;
 
@@ -231,40 +227,11 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
   return Qnil;
 }
 
-/* Clones Channel instances.
-
-   Gives Channel a consistent implementation of Ruby's object copy/dup
-   protocol. */
-static VALUE grpc_rb_channel_init_copy(VALUE copy, VALUE orig) {
-  grpc_rb_channel *orig_ch = NULL;
-  grpc_rb_channel *copy_ch = NULL;
-
-  if (copy == orig) {
-    return copy;
-  }
-
-  /* Raise an error if orig is not a channel object or a subclass. */
-  if (TYPE(orig) != T_DATA ||
-      RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_free) {
-    rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(grpc_rb_cChannel));
-    return Qnil;
-  }
-
-  TypedData_Get_Struct(orig, grpc_rb_channel, &grpc_channel_data_type, orig_ch);
-  TypedData_Get_Struct(copy, grpc_rb_channel, &grpc_channel_data_type, copy_ch);
-
-  /* use ruby's MEMCPY to make a byte-for-byte copy of the channel wrapper
-   * object. */
-  MEMCPY(copy_ch, orig_ch, grpc_rb_channel, 1);
-  return copy;
-}
-
 /* Create a call given a grpc_channel, in order to call method. The request
    is not sent until grpc_call_invoke is called. */
-static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue,
-                                         VALUE parent, VALUE mask,
-                                         VALUE method, VALUE host,
-                                         VALUE deadline) {
+static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
+                                         VALUE mask, VALUE method,
+                                         VALUE host, VALUE deadline) {
   VALUE res = Qnil;
   grpc_rb_channel *wrapper = NULL;
   grpc_call *call = NULL;
@@ -284,7 +251,7 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue,
     parent_call = grpc_rb_get_wrapped_call(parent);
   }
 
-  cq = grpc_rb_get_wrapped_completion_queue(cqueue);
+  cq = grpc_completion_queue_create(NULL);
   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
   ch = wrapper->wrapped;
   if (ch == NULL) {
@@ -301,15 +268,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE cqueue,
              method_chars);
     return Qnil;
   }
-  res = grpc_rb_wrap_call(call);
+  res = grpc_rb_wrap_call(call, cq);
 
   /* Make this channel an instance attribute of the call so that it is not GCed
    * before the call. */
   rb_ivar_set(res, id_channel, self);
-
-  /* Make the completion queue an instance attribute of the call so that it is
-   * not GCed before the call. */
-  rb_ivar_set(res, id_cqueue, cqueue);
   return res;
 }
 
@@ -387,7 +350,7 @@ void Init_grpc_channel() {
   /* Provides a ruby constructor and support for dup/clone. */
   rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1);
   rb_define_method(grpc_rb_cChannel, "initialize_copy",
-                   grpc_rb_channel_init_copy, 1);
+                   grpc_rb_cannot_init_copy, 1);
 
   /* Add ruby analogues of the Channel methods. */
   rb_define_method(grpc_rb_cChannel, "connectivity_state",
@@ -396,13 +359,12 @@ void Init_grpc_channel() {
   rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
                    grpc_rb_channel_watch_connectivity_state, 4);
   rb_define_method(grpc_rb_cChannel, "create_call",
-                   grpc_rb_channel_create_call, 6);
+                   grpc_rb_channel_create_call, 5);
   rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
   rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
   rb_define_alias(grpc_rb_cChannel, "close", "destroy");
 
   id_channel = rb_intern("__channel");
-  id_cqueue = rb_intern("__cqueue");
   id_target = rb_intern("__target");
   rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
                   ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
diff --git a/src/ruby/ext/grpc/rb_channel_credentials.c b/src/ruby/ext/grpc/rb_channel_credentials.c
index cbb23885aa60bbd8151a8c0728572269615a2773..5b7aa3417e6ce300a2bd561eec43d0266ab8b44e 100644
--- a/src/ruby/ext/grpc/rb_channel_credentials.c
+++ b/src/ruby/ext/grpc/rb_channel_credentials.c
@@ -126,36 +126,6 @@ VALUE grpc_rb_wrap_channel_credentials(grpc_channel_credentials *c, VALUE mark)
   return rb_wrapper;
 }
 
-/* Clones ChannelCredentials instances.
-   Gives ChannelCredentials a consistent implementation of Ruby's object copy/dup
-   protocol. */
-static VALUE grpc_rb_channel_credentials_init_copy(VALUE copy, VALUE orig) {
-  grpc_rb_channel_credentials *orig_cred = NULL;
-  grpc_rb_channel_credentials *copy_cred = NULL;
-
-  if (copy == orig) {
-    return copy;
-  }
-
-  /* Raise an error if orig is not a credentials object or a subclass. */
-  if (TYPE(orig) != T_DATA ||
-      RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_channel_credentials_free) {
-    rb_raise(rb_eTypeError, "not a %s",
-             rb_obj_classname(grpc_rb_cChannelCredentials));
-  }
-
-  TypedData_Get_Struct(orig, grpc_rb_channel_credentials,
-                       &grpc_rb_channel_credentials_data_type, orig_cred);
-  TypedData_Get_Struct(copy, grpc_rb_channel_credentials,
-                       &grpc_rb_channel_credentials_data_type, copy_cred);
-
-  /* use ruby's MEMCPY to make a byte-for-byte copy of the credentials
-   * wrapper object. */
-  MEMCPY(copy_cred, orig_cred, grpc_rb_channel_credentials, 1);
-  return copy;
-}
-
-
 /* The attribute used on the mark object to hold the pem_root_certs. */
 static ID id_pem_root_certs;
 
@@ -271,7 +241,7 @@ void Init_grpc_channel_credentials() {
   rb_define_method(grpc_rb_cChannelCredentials, "initialize",
                    grpc_rb_channel_credentials_init, -1);
   rb_define_method(grpc_rb_cChannelCredentials, "initialize_copy",
-                   grpc_rb_channel_credentials_init_copy, 1);
+                   grpc_rb_cannot_init_copy, 1);
   rb_define_method(grpc_rb_cChannelCredentials, "compose",
                    grpc_rb_channel_credentials_compose, -1);
   rb_define_module_function(grpc_rb_cChannelCredentials,
diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c
index 9466402db0c6b4887698b704e8b7a60347a23f79..fd75d2f691f38039aca516885406990eda9da8aa 100644
--- a/src/ruby/ext/grpc/rb_completion_queue.c
+++ b/src/ruby/ext/grpc/rb_completion_queue.c
@@ -40,12 +40,9 @@
 
 #include <grpc/grpc.h>
 #include <grpc/support/time.h>
+#include <grpc/support/log.h>
 #include "rb_grpc.h"
 
-/* grpc_rb_cCompletionQueue is the ruby class that proxies
- * grpc_completion_queue. */
-static VALUE grpc_rb_cCompletionQueue = Qnil;
-
 /* Used to allow grpc_completion_queue_next call to release the GIL */
 typedef struct next_call_stack {
   grpc_completion_queue *cq;
@@ -55,23 +52,6 @@ typedef struct next_call_stack {
   volatile int interrupted;
 } next_call_stack;
 
-/* Calls grpc_completion_queue_next without holding the ruby GIL */
-static void *grpc_rb_completion_queue_next_no_gil(void *param) {
-  next_call_stack *const next_call = (next_call_stack*)param;
-  gpr_timespec increment = gpr_time_from_millis(20, GPR_TIMESPAN);
-  gpr_timespec deadline;
-  do {
-    deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), increment);
-    next_call->event = grpc_completion_queue_next(next_call->cq,
-                                                  deadline, NULL);
-    if (next_call->event.type != GRPC_QUEUE_TIMEOUT ||
-        gpr_time_cmp(deadline, next_call->timeout) > 0) {
-      break;
-    }
-  } while (!next_call->interrupted);
-  return NULL;
-}
-
 /* Calls grpc_completion_queue_pluck without holding the ruby GIL */
 static void *grpc_rb_completion_queue_pluck_no_gil(void *param) {
   next_call_stack *const next_call = (next_call_stack*)param;
@@ -90,107 +70,32 @@ static void *grpc_rb_completion_queue_pluck_no_gil(void *param) {
   return NULL;
 }
 
-/* Shuts down and drains the completion queue if necessary.
- *
- * This is done when the ruby completion queue object is about to be GCed.
- */
-static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) {
-  next_call_stack next_call;
-  grpc_completion_type type;
-  int drained = 0;
-  MEMZERO(&next_call, next_call_stack, 1);
-
-  grpc_completion_queue_shutdown(cq);
-  next_call.cq = cq;
-  next_call.event.type = GRPC_QUEUE_TIMEOUT;
-  /* TODO: the timeout should be a module level constant that defaults
-   * to gpr_inf_future(GPR_CLOCK_REALTIME).
-   *
-   * - at the moment this does not work, it stalls.  Using a small timeout like
-   *   this one works, and leads to fast test run times; a longer timeout was
-   *   causing unnecessary delays in the test runs.
-   *
-   * - investigate further, this is probably another example of C-level cleanup
-   * not working consistently in all cases.
-   */
-  next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
-                                   gpr_time_from_micros(5e3, GPR_TIMESPAN));
-  do {
-    rb_thread_call_without_gvl(grpc_rb_completion_queue_next_no_gil,
-                               (void *)&next_call, NULL, NULL);
-    type = next_call.event.type;
-    if (type == GRPC_QUEUE_TIMEOUT) break;
-    if (type != GRPC_QUEUE_SHUTDOWN) {
-      ++drained;
-      rb_warning("completion queue shutdown: %d undrained events", drained);
-    }
-  } while (type != GRPC_QUEUE_SHUTDOWN);
-}
-
 /* Helper function to free a completion queue. */
-static void grpc_rb_completion_queue_destroy(void *p) {
-  grpc_completion_queue *cq = NULL;
-  if (p == NULL) {
-    return;
-  }
-  cq = (grpc_completion_queue *)p;
-  grpc_rb_completion_queue_shutdown_drain(cq);
+void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq) {
+  /* Every function that adds an event to a queue also synchronously plucks
+     that event from the queue, and holds a reference to the Ruby object that
+     holds the queue, so we only get to this point if all of those functions
+     have completed, and the queue is empty */
+  grpc_completion_queue_shutdown(cq);
   grpc_completion_queue_destroy(cq);
 }
 
-static rb_data_type_t grpc_rb_completion_queue_data_type = {
-    "grpc_completion_queue",
-    {GRPC_RB_GC_NOT_MARKED, grpc_rb_completion_queue_destroy,
-     GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}},
-    NULL, NULL,
-#ifdef RUBY_TYPED_FREE_IMMEDIATELY
-    /* cannot immediately free because grpc_rb_completion_queue_shutdown_drain
-     * calls rb_thread_call_without_gvl. */
-    0,
-#endif
-};
-
-/* Releases the c-level resources associated with a completion queue */
-static VALUE grpc_rb_completion_queue_close(VALUE self) {
-  grpc_completion_queue* cq = grpc_rb_get_wrapped_completion_queue(self);
-  grpc_rb_completion_queue_destroy(cq);
-  RTYPEDDATA_DATA(self) = NULL;
-  return Qnil;
-}
-
-/* Allocates a completion queue. */
-static VALUE grpc_rb_completion_queue_alloc(VALUE cls) {
-  grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
-  if (cq == NULL) {
-    rb_raise(rb_eArgError, "could not create a completion queue: not sure why");
-  }
-  return TypedData_Wrap_Struct(cls, &grpc_rb_completion_queue_data_type, cq);
-}
-
 static void unblock_func(void *param) {
   next_call_stack *const next_call = (next_call_stack*)param;
   next_call->interrupted = 1;
 }
 
-/* Blocks until the next event for given tag is available, and returns the
- * event. */
-grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
-                                                VALUE timeout) {
+/* Does the same thing as grpc_completion_queue_pluck, while properly releasing
+   the GVL and handling interrupts */
+grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag,
+                                     gpr_timespec deadline, void *reserved) {
   next_call_stack next_call;
   MEMZERO(&next_call, next_call_stack, 1);
-  TypedData_Get_Struct(self, grpc_completion_queue,
-                       &grpc_rb_completion_queue_data_type, next_call.cq);
-  if (TYPE(timeout) == T_NIL) {
-    next_call.timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
-  } else {
-    next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
-  }
-  if (TYPE(tag) == T_NIL) {
-    next_call.tag = NULL;
-  } else {
-    next_call.tag = ROBJECT(tag);
-  }
+  next_call.cq = queue;
+  next_call.timeout = deadline;
+  next_call.tag = tag;
   next_call.event.type = GRPC_QUEUE_TIMEOUT;
+  (void)reserved;
   /* Loop until we finish a pluck without an interruption. The internal
      pluck function runs either until it is interrupted or it gets an
      event, or time runs out.
@@ -210,27 +115,3 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
            next_call.event.type == GRPC_QUEUE_TIMEOUT);
   return next_call.event;
 }
-
-void Init_grpc_completion_queue() {
-  grpc_rb_cCompletionQueue =
-      rb_define_class_under(grpc_rb_mGrpcCore, "CompletionQueue", rb_cObject);
-
-  /* constructor: uses an alloc func without an initializer. Using a simple
-     alloc func works here as the grpc header does not specify any args for
-     this func, so no separate initialization step is necessary. */
-  rb_define_alloc_func(grpc_rb_cCompletionQueue,
-                       grpc_rb_completion_queue_alloc);
-
-  /* close: Provides a way to close the underlying file descriptor without
-     waiting for ruby garbage collection. */
-  rb_define_method(grpc_rb_cCompletionQueue, "close",
-                   grpc_rb_completion_queue_close, 0);
-}
-
-/* Gets the wrapped completion queue from the ruby wrapper */
-grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v) {
-  grpc_completion_queue *cq = NULL;
-  TypedData_Get_Struct(v, grpc_completion_queue,
-                       &grpc_rb_completion_queue_data_type, cq);
-  return cq;
-}
diff --git a/src/ruby/ext/grpc/rb_completion_queue.h b/src/ruby/ext/grpc/rb_completion_queue.h
index 42de43c3fbb3604809c2c8459b13fa07698993c0..9f8f6aa5fff67328fb22a86b8f46bb273c7ae012 100644
--- a/src/ruby/ext/grpc/rb_completion_queue.h
+++ b/src/ruby/ext/grpc/rb_completion_queue.h
@@ -41,15 +41,14 @@
 /* Gets the wrapped completion queue from the ruby wrapper */
 grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v);
 
+void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq);
+
 /**
  * Makes the implementation of CompletionQueue#pluck available in other files
  *
  * This avoids having code that holds the GIL repeated at multiple sites.
  */
-grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
-                                                VALUE timeout);
-
-/* Initializes the CompletionQueue class. */
-void Init_grpc_completion_queue();
+grpc_event rb_completion_queue_pluck(grpc_completion_queue *queue, void *tag,
+                                     gpr_timespec deadline, void *reserved);
 
 #endif /* GRPC_RB_COMPLETION_QUEUE_H_ */
diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c
index 9246893f9fb5aaff0d21805f9d3d10a825e16c33..188a62475d21690430e21cb400e0ab550b5d290f 100644
--- a/src/ruby/ext/grpc/rb_grpc.c
+++ b/src/ruby/ext/grpc/rb_grpc.c
@@ -46,7 +46,6 @@
 #include "rb_call_credentials.h"
 #include "rb_channel.h"
 #include "rb_channel_credentials.h"
-#include "rb_completion_queue.h"
 #include "rb_loader.h"
 #include "rb_server.h"
 #include "rb_server_credentials.h"
@@ -85,7 +84,7 @@ VALUE grpc_rb_cannot_init(VALUE self) {
 VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self) {
   (void)self;
   rb_raise(rb_eTypeError,
-           "initialization of %s only allowed from the gRPC native layer",
+           "Copy initialization of %s is not supported",
            rb_obj_classname(copy));
   return Qnil;
 }
@@ -318,7 +317,7 @@ void Init_grpc_c() {
   grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core");
   grpc_rb_sNewServerRpc =
       rb_struct_define("NewServerRpc", "method", "host",
-                       "deadline", "metadata", "call", "cq", NULL);
+                       "deadline", "metadata", "call", NULL);
   grpc_rb_sStatus =
       rb_struct_define("Status", "code", "details", "metadata", NULL);
   sym_code = ID2SYM(rb_intern("code"));
@@ -326,7 +325,6 @@ void Init_grpc_c() {
   sym_metadata = ID2SYM(rb_intern("metadata"));
 
   Init_grpc_channel();
-  Init_grpc_completion_queue();
   Init_grpc_call();
   Init_grpc_call_credentials();
   Init_grpc_channel_credentials();
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 4d9707638f18a3a9dd9927cd27165cef73229d89..9748cb576bada30eeb65a8d68d907f34a6e2c54d 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -128,6 +128,7 @@ grpc_is_binary_header_type grpc_is_binary_header_import;
 grpc_call_error_to_string_type grpc_call_error_to_string_import;
 grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
 grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
+grpc_use_signal_type grpc_use_signal_import;
 grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
 grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import;
 grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import;
@@ -399,6 +400,7 @@ void grpc_rb_load_imports(HMODULE library) {
   grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
   grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
   grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
+  grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal");
   grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next");
   grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator");
   grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index 072e29c232792291dfeaa1650e52d78f98e8d0cc..6f0974e31b425e53bd5c9bb8eb8de03b23c4cad5 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -335,6 +335,9 @@ extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_fr
 typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd);
 extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
 #define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import
+typedef void(*grpc_use_signal_type)(int signum);
+extern grpc_use_signal_type grpc_use_signal_import;
+#define grpc_use_signal grpc_use_signal_import
 typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it);
 extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
 #define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import
@@ -467,7 +470,7 @@ extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
 typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer);
 extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
 #define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import
-typedef void(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer);
+typedef int(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer);
 extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
 #define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import
 typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader);
diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c
index f108b8acfcdbc1160d7eafb43a3fddb1b50de1f0..bf26841fd226afb4dfe75b7fff996e6a1eec6d4c 100644
--- a/src/ruby/ext/grpc/rb_server.c
+++ b/src/ruby/ext/grpc/rb_server.c
@@ -38,6 +38,7 @@
 
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
+#include <grpc/support/log.h>
 #include "rb_call.h"
 #include "rb_channel_args.h"
 #include "rb_completion_queue.h"
@@ -53,53 +54,51 @@ static ID id_at;
 /* id_insecure_server is used to indicate that a server is insecure */
 static VALUE id_insecure_server;
 
-/* grpc_rb_server wraps a grpc_server.  It provides a peer ruby object,
-  'mark' to minimize copying when a server is created from ruby. */
+/* grpc_rb_server wraps a grpc_server. */
 typedef struct grpc_rb_server {
-  /* Holder of ruby objects involved in constructing the server */
-  VALUE mark;
   /* The actual server */
   grpc_server *wrapped;
   grpc_completion_queue *queue;
 } grpc_rb_server;
 
+static void destroy_server(grpc_rb_server *server, gpr_timespec deadline) {
+  grpc_event ev;
+  if (server->wrapped != NULL) {
+    grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL);
+    ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL);
+    if (ev.type == GRPC_QUEUE_TIMEOUT) {
+      grpc_server_cancel_all_calls(server->wrapped);
+      rb_completion_queue_pluck(server->queue, NULL,
+                                gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+    }
+    grpc_server_destroy(server->wrapped);
+    grpc_rb_completion_queue_destroy(server->queue);
+    server->wrapped = NULL;
+    server->queue = NULL;
+  }
+}
+
 /* Destroys server instances. */
 static void grpc_rb_server_free(void *p) {
   grpc_rb_server *svr = NULL;
+  gpr_timespec deadline;
   if (p == NULL) {
     return;
   };
   svr = (grpc_rb_server *)p;
 
-  /* Deletes the wrapped object if the mark object is Qnil, which indicates
-     that no other object is the actual owner. */
-  /* grpc_server_shutdown does not exist. Change this to something that does
-     or delete it */
-  if (svr->wrapped != NULL && svr->mark == Qnil) {
-    // grpc_server_shutdown(svr->wrapped);
-    // Aborting to indicate a bug
-    abort();
-    grpc_server_destroy(svr->wrapped);
-  }
+  deadline = gpr_time_add(
+      gpr_now(GPR_CLOCK_REALTIME),
+      gpr_time_from_seconds(2, GPR_TIMESPAN));
 
-  xfree(p);
-}
+  destroy_server(svr, deadline);
 
-/* Protects the mark object from GC */
-static void grpc_rb_server_mark(void *p) {
-  grpc_rb_server *server = NULL;
-  if (p == NULL) {
-    return;
-  }
-  server = (grpc_rb_server *)p;
-  if (server->mark != Qnil) {
-    rb_gc_mark(server->mark);
-  }
+  xfree(p);
 }
 
 static const rb_data_type_t grpc_rb_server_data_type = {
     "grpc_server",
-    {grpc_rb_server_mark, grpc_rb_server_free, GRPC_RB_MEMSIZE_UNAVAILABLE,
+    {GRPC_RB_GC_NOT_MARKED, grpc_rb_server_free, GRPC_RB_MEMSIZE_UNAVAILABLE,
      {NULL, NULL}},
     NULL,
     NULL,
@@ -116,23 +115,20 @@ static const rb_data_type_t grpc_rb_server_data_type = {
 static VALUE grpc_rb_server_alloc(VALUE cls) {
   grpc_rb_server *wrapper = ALLOC(grpc_rb_server);
   wrapper->wrapped = NULL;
-  wrapper->mark = Qnil;
   return TypedData_Wrap_Struct(cls, &grpc_rb_server_data_type, wrapper);
 }
 
 /*
   call-seq:
-    cq = CompletionQueue.new
-    server = Server.new(cq, {'arg1': 'value1'})
+    server = Server.new({'arg1': 'value1'})
 
   Initializes server instances. */
-static VALUE grpc_rb_server_init(VALUE self, VALUE cqueue, VALUE channel_args) {
-  grpc_completion_queue *cq = NULL;
+static VALUE grpc_rb_server_init(VALUE self, VALUE channel_args) {
+  grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
   grpc_rb_server *wrapper = NULL;
   grpc_server *srv = NULL;
   grpc_channel_args args;
   MEMZERO(&args, grpc_channel_args, 1);
-  cq = grpc_rb_get_wrapped_completion_queue(cqueue);
   TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type,
                        wrapper);
   grpc_rb_hash_convert_to_channel_args(channel_args, &args);
@@ -148,41 +144,9 @@ static VALUE grpc_rb_server_init(VALUE self, VALUE cqueue, VALUE channel_args) {
   wrapper->wrapped = srv;
   wrapper->queue = cq;
 
-  /* Add the cq as the server's mark object. This ensures the ruby cq can't be
-     GCed before the server */
-  wrapper->mark = cqueue;
   return self;
 }
 
-/* Clones Server instances.
-
-   Gives Server a consistent implementation of Ruby's object copy/dup
-   protocol. */
-static VALUE grpc_rb_server_init_copy(VALUE copy, VALUE orig) {
-  grpc_rb_server *orig_srv = NULL;
-  grpc_rb_server *copy_srv = NULL;
-
-  if (copy == orig) {
-    return copy;
-  }
-
-  /* Raise an error if orig is not a server object or a subclass. */
-  if (TYPE(orig) != T_DATA ||
-      RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_server_free) {
-    rb_raise(rb_eTypeError, "not a %s", rb_obj_classname(grpc_rb_cServer));
-  }
-
-  TypedData_Get_Struct(orig, grpc_rb_server, &grpc_rb_server_data_type,
-                       orig_srv);
-  TypedData_Get_Struct(copy, grpc_rb_server, &grpc_rb_server_data_type,
-                       copy_srv);
-
-  /* use ruby's MEMCPY to make a byte-for-byte copy of the server wrapper
-     object. */
-  MEMCPY(copy_srv, orig_srv, grpc_rb_server, 1);
-  return copy;
-}
-
 /* request_call_stack holds various values used by the
  * grpc_rb_server_request_call function */
 typedef struct request_call_stack {
@@ -208,65 +172,57 @@ static void grpc_request_call_stack_cleanup(request_call_stack* st) {
 }
 
 /* call-seq:
-   cq = CompletionQueue.new
-   tag = Object.new
-   timeout = 10
-   server.request_call(cqueue, tag, timeout)
+   server.request_call
 
    Requests notification of a new call on a server. */
-static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
-                                         VALUE tag_new, VALUE timeout) {
+static VALUE grpc_rb_server_request_call(VALUE self) {
   grpc_rb_server *s = NULL;
   grpc_call *call = NULL;
   grpc_event ev;
   grpc_call_error err;
   request_call_stack st;
   VALUE result;
+  void *tag = (void*)&st;
+  grpc_completion_queue *call_queue = grpc_completion_queue_create(NULL);
   gpr_timespec deadline;
   TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
   if (s->wrapped == NULL) {
     rb_raise(rb_eRuntimeError, "destroyed!");
     return Qnil;
-  } else {
-    grpc_request_call_stack_init(&st);
-    /* call grpc_server_request_call, then wait for it to complete using
-     * pluck_event */
-    err = grpc_server_request_call(
-        s->wrapped, &call, &st.details, &st.md_ary,
-        grpc_rb_get_wrapped_completion_queue(cqueue),
-        grpc_rb_get_wrapped_completion_queue(s->mark),
-        ROBJECT(tag_new));
-    if (err != GRPC_CALL_OK) {
-      grpc_request_call_stack_cleanup(&st);
-      rb_raise(grpc_rb_eCallError,
-              "grpc_server_request_call failed: %s (code=%d)",
-               grpc_call_error_detail_of(err), err);
-      return Qnil;
-    }
-
-    ev = grpc_rb_completion_queue_pluck_event(s->mark, tag_new, timeout);
-    if (ev.type == GRPC_QUEUE_TIMEOUT) {
-      grpc_request_call_stack_cleanup(&st);
-      return Qnil;
-    }
-    if (!ev.success) {
-      grpc_request_call_stack_cleanup(&st);
-      rb_raise(grpc_rb_eCallError, "request_call completion failed");
-      return Qnil;
-    }
+  }
+  grpc_request_call_stack_init(&st);
+  /* call grpc_server_request_call, then wait for it to complete using
+   * pluck_event */
+  err = grpc_server_request_call(
+      s->wrapped, &call, &st.details, &st.md_ary,
+      call_queue, s->queue, tag);
+  if (err != GRPC_CALL_OK) {
+    grpc_request_call_stack_cleanup(&st);
+    rb_raise(grpc_rb_eCallError,
+             "grpc_server_request_call failed: %s (code=%d)",
+             grpc_call_error_detail_of(err), err);
+    return Qnil;
+  }
 
-    /* build the NewServerRpc struct result */
-    deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
-    result = rb_struct_new(
-        grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
-        rb_str_new2(st.details.host),
-        rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
-                   INT2NUM(deadline.tv_nsec)),
-        grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), cqueue, NULL);
+  ev = rb_completion_queue_pluck(s->queue, tag,
+                                 gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+  if (!ev.success) {
     grpc_request_call_stack_cleanup(&st);
-    return result;
+    rb_raise(grpc_rb_eCallError, "request_call completion failed");
+    return Qnil;
   }
-  return Qnil;
+
+  /* build the NewServerRpc struct result */
+  deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
+  result = rb_struct_new(
+      grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
+      rb_str_new2(st.details.host),
+      rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
+                 INT2NUM(deadline.tv_nsec)),
+      grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue),
+      NULL);
+  grpc_request_call_stack_cleanup(&st);
+  return result;
 }
 
 static VALUE grpc_rb_server_start(VALUE self) {
@@ -282,41 +238,33 @@ static VALUE grpc_rb_server_start(VALUE self) {
 
 /*
   call-seq:
-    cq = CompletionQueue.new
-    server = Server.new(cq, {'arg1': 'value1'})
+    server = Server.new({'arg1': 'value1'})
     ... // do stuff with server
     ...
     ... // to shutdown the server
-    server.destroy(cq)
+    server.destroy()
 
     ... // to shutdown the server with a timeout
-    server.destroy(cq, timeout)
+    server.destroy(timeout)
 
   Destroys server instances. */
 static VALUE grpc_rb_server_destroy(int argc, VALUE *argv, VALUE self) {
-  VALUE cqueue = Qnil;
   VALUE timeout = Qnil;
-  grpc_completion_queue *cq = NULL;
-  grpc_event ev;
+  gpr_timespec deadline;
   grpc_rb_server *s = NULL;
 
-  /* "11" == 1 mandatory args, 1 (timeout) is optional */
-  rb_scan_args(argc, argv, "11", &cqueue, &timeout);
-  cq = grpc_rb_get_wrapped_completion_queue(cqueue);
+  /* "01" == 0 mandatory args, 1 (timeout) is optional */
+  rb_scan_args(argc, argv, "01", &timeout);
   TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
-
-  if (s->wrapped != NULL) {
-    grpc_server_shutdown_and_notify(s->wrapped, cq, NULL);
-    ev = grpc_rb_completion_queue_pluck_event(cqueue, Qnil, timeout);
-    if (!ev.success) {
-      rb_warn("server shutdown failed, cancelling the calls, objects may leak");
-      grpc_server_cancel_all_calls(s->wrapped);
-      return Qfalse;
-    }
-    grpc_server_destroy(s->wrapped);
-    s->wrapped = NULL;
+  if (TYPE(timeout) == T_NIL) {
+    deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+  } else {
+    deadline = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
   }
-  return Qtrue;
+
+  destroy_server(s, deadline);
+
+  return Qnil;
 }
 
 /*
@@ -376,13 +324,13 @@ void Init_grpc_server() {
   rb_define_alloc_func(grpc_rb_cServer, grpc_rb_server_alloc);
 
   /* Provides a ruby constructor and support for dup/clone. */
-  rb_define_method(grpc_rb_cServer, "initialize", grpc_rb_server_init, 2);
+  rb_define_method(grpc_rb_cServer, "initialize", grpc_rb_server_init, 1);
   rb_define_method(grpc_rb_cServer, "initialize_copy",
-                   grpc_rb_server_init_copy, 1);
+                   grpc_rb_cannot_init_copy, 1);
 
   /* Add the server methods. */
   rb_define_method(grpc_rb_cServer, "request_call",
-                   grpc_rb_server_request_call, 3);
+                   grpc_rb_server_request_call, 0);
   rb_define_method(grpc_rb_cServer, "start", grpc_rb_server_start, 0);
   rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, -1);
   rb_define_alias(grpc_rb_cServer, "close", "destroy");
diff --git a/src/ruby/ext/grpc/rb_server_credentials.c b/src/ruby/ext/grpc/rb_server_credentials.c
index 3b0fb6c910df0bb069c579a97ea3441561d381f2..a44ce715ae850492ceccadadfb5c9870d0e4a4fa 100644
--- a/src/ruby/ext/grpc/rb_server_credentials.c
+++ b/src/ruby/ext/grpc/rb_server_credentials.c
@@ -38,6 +38,7 @@
 
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
+#include <grpc/support/log.h>
 
 #include "rb_grpc.h"
 
@@ -46,8 +47,8 @@
 static VALUE grpc_rb_cServerCredentials = Qnil;
 
 /* grpc_rb_server_credentials wraps a grpc_server_credentials.  It provides a
-   peer ruby object, 'mark' to minimize copying when a server credential is
-   created from ruby. */
+   peer ruby object, 'mark' to hold references to objects involved in
+   constructing the server credentials. */
 typedef struct grpc_rb_server_credentials {
   /* Holder of ruby objects involved in constructing the server credentials */
   VALUE mark;
@@ -111,36 +112,6 @@ static VALUE grpc_rb_server_credentials_alloc(VALUE cls) {
                                wrapper);
 }
 
-/* Clones ServerCredentials instances.
-
-   Gives ServerCredentials a consistent implementation of Ruby's object copy/dup
-   protocol. */
-static VALUE grpc_rb_server_credentials_init_copy(VALUE copy, VALUE orig) {
-  grpc_rb_server_credentials *orig_ch = NULL;
-  grpc_rb_server_credentials *copy_ch = NULL;
-
-  if (copy == orig) {
-    return copy;
-  }
-
-  /* Raise an error if orig is not a server_credentials object or a subclass. */
-  if (TYPE(orig) != T_DATA ||
-      RDATA(orig)->dfree != (RUBY_DATA_FUNC)grpc_rb_server_credentials_free) {
-    rb_raise(rb_eTypeError, "not a %s",
-             rb_obj_classname(grpc_rb_cServerCredentials));
-  }
-
-  TypedData_Get_Struct(orig, grpc_rb_server_credentials,
-                       &grpc_rb_server_credentials_data_type, orig_ch);
-  TypedData_Get_Struct(copy, grpc_rb_server_credentials,
-                       &grpc_rb_server_credentials_data_type, copy_ch);
-
-  /* use ruby's MEMCPY to make a byte-for-byte copy of the server_credentials
-     wrapper object. */
-  MEMCPY(copy_ch, orig_ch, grpc_rb_server_credentials, 1);
-  return copy;
-}
-
 /* The attribute used on the mark object to preserve the pem_root_certs. */
 static ID id_pem_root_certs;
 
@@ -270,7 +241,7 @@ void Init_grpc_server_credentials() {
   rb_define_method(grpc_rb_cServerCredentials, "initialize",
                    grpc_rb_server_credentials_init, 3);
   rb_define_method(grpc_rb_cServerCredentials, "initialize_copy",
-                   grpc_rb_server_credentials_init_copy, 1);
+                   grpc_rb_cannot_init_copy, 1);
 
   id_pem_key_certs = rb_intern("__pem_key_certs");
   id_pem_root_certs = rb_intern("__pem_root_certs");
diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb
index b03ddbc193c1ed904424b7689a07391bb42e8719..4260d854376b56884e68a8ce9cbbd51ee5890adc 100644
--- a/src/ruby/lib/grpc/generic/active_call.rb
+++ b/src/ruby/lib/grpc/generic/active_call.rb
@@ -43,8 +43,7 @@ class Struct
         GRPC.logger.debug("Failing with status #{status}")
         # raise BadStatus, propagating the metadata if present.
         md = status.metadata
-        with_sym_keys = Hash[md.each_pair.collect { |x, y| [x.to_sym, y] }]
-        fail GRPC::BadStatus.new(status.code, status.details, with_sym_keys)
+        fail GRPC::BadStatus.new(status.code, status.details, md)
       end
       status
     end
@@ -61,7 +60,7 @@ module GRPC
     extend Forwardable
     attr_reader(:deadline)
     def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=,
-                   :peer, :peer_cert
+                   :peer, :peer_cert, :trailing_metadata
 
     # client_invoke begins a client invocation.
     #
@@ -75,17 +74,10 @@ module GRPC
     # if a keyword value is a list, multiple metadata for it's key are sent
     #
     # @param call [Call] a call on which to start and invocation
-    # @param q [CompletionQueue] the completion queue
     # @param metadata [Hash] the metadata
-    def self.client_invoke(call, q, metadata = {})
+    def self.client_invoke(call, metadata = {})
       fail(TypeError, '!Core::Call') unless call.is_a? Core::Call
-      unless q.is_a? Core::CompletionQueue
-        fail(TypeError, '!Core::CompletionQueue')
-      end
-      metadata_tag = Object.new
-      call.run_batch(q, metadata_tag, INFINITE_FUTURE,
-                     SEND_INITIAL_METADATA => metadata)
-      metadata_tag
+      call.run_batch(SEND_INITIAL_METADATA => metadata)
     end
 
     # Creates an ActiveCall.
@@ -102,26 +94,21 @@ module GRPC
     # deadline is the absolute deadline for the call.
     #
     # @param call [Call] the call used by the ActiveCall
-    # @param q [CompletionQueue] the completion queue used to accept
-    #          the call.  This queue will be closed on call completion.
     # @param marshal [Function] f(obj)->string that marshal requests
     # @param unmarshal [Function] f(string)->obj that unmarshals responses
     # @param deadline [Fixnum] the deadline for the call to complete
-    # @param metadata_tag [Object] the object use obtain metadata for clients
-    # @param started [true|false] indicates if the call has begun
-    def initialize(call, q, marshal, unmarshal, deadline, started: true,
-                   metadata_tag: nil)
+    # @param started [true|false] indicates that metadata was sent
+    # @param metadata_received [true|false] indicates if metadata has already
+    #     been received. Should always be true for server calls
+    def initialize(call, marshal, unmarshal, deadline, started: true,
+                   metadata_received: false)
       fail(TypeError, '!Core::Call') unless call.is_a? Core::Call
-      unless q.is_a? Core::CompletionQueue
-        fail(TypeError, '!Core::CompletionQueue')
-      end
       @call = call
-      @cq = q
       @deadline = deadline
       @marshal = marshal
-      @started = started
       @unmarshal = unmarshal
-      @metadata_tag = metadata_tag
+      @metadata_received = metadata_received
+      @metadata_sent = started
       @op_notifier = nil
     end
 
@@ -132,7 +119,7 @@ module GRPC
     end
 
     # cancelled indicates if the call was cancelled
-    def cancelled
+    def cancelled?
       !@call.status.nil? && @call.status.code == Core::StatusCodes::CANCELLED
     end
 
@@ -168,8 +155,11 @@ module GRPC
         SEND_CLOSE_FROM_CLIENT => nil
       }
       ops[RECV_STATUS_ON_CLIENT] = nil if assert_finished
-      batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
+      batch_result = @call.run_batch(ops)
       return unless assert_finished
+      unless batch_result.status.nil?
+        @call.trailing_metadata = batch_result.status.metadata
+      end
       @call.status = batch_result.status
       op_is_done
       batch_result.check_status
@@ -179,20 +169,14 @@ module GRPC
     #
     # It blocks until the remote endpoint acknowledges by sending a status.
     def finished
-      batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE,
-                                     RECV_STATUS_ON_CLIENT => nil)
+      batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
       unless batch_result.status.nil?
-        if @call.metadata.nil?
-          @call.metadata = batch_result.status.metadata
-        else
-          @call.metadata.merge!(batch_result.status.metadata)
-        end
+        @call.trailing_metadata = batch_result.status.metadata
       end
       @call.status = batch_result.status
       op_is_done
       batch_result.check_status
       @call.close
-      @cq.close
     end
 
     # remote_send sends a request to the remote endpoint.
@@ -203,9 +187,10 @@ module GRPC
     # @param marshalled [false, true] indicates if the object is already
     # marshalled.
     def remote_send(req, marshalled = false)
+      # TODO(murgatroid99): ensure metadata was sent
       GRPC.logger.debug("sending #{req}, marshalled? #{marshalled}")
       payload = marshalled ? req : @marshal.call(req)
-      @call.run_batch(@cq, self, INFINITE_FUTURE, SEND_MESSAGE => payload)
+      @call.run_batch(SEND_MESSAGE => payload)
     end
 
     # send_status sends a status to the remote endpoint.
@@ -222,7 +207,7 @@ module GRPC
         SEND_STATUS_FROM_SERVER => Struct::Status.new(code, details, metadata)
       }
       ops[RECV_CLOSE_ON_SERVER] = nil if assert_finished
-      @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
+      @call.run_batch(ops)
       nil
     end
 
@@ -234,11 +219,11 @@ module GRPC
     # raising BadStatus
     def remote_read
       ops = { RECV_MESSAGE => nil }
-      ops[RECV_INITIAL_METADATA] = nil unless @metadata_tag.nil?
-      batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
-      unless @metadata_tag.nil?
+      ops[RECV_INITIAL_METADATA] = nil unless @metadata_received
+      batch_result = @call.run_batch(ops)
+      unless @metadata_received
         @call.metadata = batch_result.metadata
-        @metadata_tag = nil
+        @metadata_received = true
       end
       GRPC.logger.debug("received req: #{batch_result}")
       unless batch_result.nil? || batch_result.message.nil?
@@ -318,7 +303,7 @@ module GRPC
     # a list, multiple metadata for its key are sent
     # @return [Object] the response received from the server
     def request_response(req, metadata: {})
-      start_call(metadata) unless @started
+      start_call(metadata)
       remote_send(req)
       writes_done(false)
       response = remote_read
@@ -342,7 +327,7 @@ module GRPC
     # a list, multiple metadata for its key are sent
     # @return [Object] the response received from the server
     def client_streamer(requests, metadata: {})
-      start_call(metadata) unless @started
+      start_call(metadata)
       requests.each { |r| remote_send(r) }
       writes_done(false)
       response = remote_read
@@ -368,7 +353,7 @@ module GRPC
     # a list, multiple metadata for its key are sent
     # @return [Enumerator|nil] a response Enumerator
     def server_streamer(req, metadata: {})
-      start_call(metadata) unless @started
+      start_call(metadata)
       remote_send(req)
       writes_done(false)
       replies = enum_for(:each_remote_read_then_finish)
@@ -407,10 +392,9 @@ module GRPC
     # a list, multiple metadata for its key are sent
     # @return [Enumerator, nil] a response Enumerator
     def bidi_streamer(requests, metadata: {}, &blk)
-      start_call(metadata) unless @started
-      bd = BidiCall.new(@call, @cq, @marshal, @unmarshal,
-                        metadata_tag: @metadata_tag)
-      @metadata_tag = nil  # run_on_client ensures metadata is read
+      start_call(metadata)
+      bd = BidiCall.new(@call, @marshal, @unmarshal,
+                        metadata_received: @metadata_received)
       bd.run_on_client(requests, @op_notifier, &blk)
     end
 
@@ -426,7 +410,8 @@ module GRPC
     #
     # @param gen_each_reply [Proc] generates the BiDi stream replies
     def run_server_bidi(gen_each_reply)
-      bd = BidiCall.new(@call, @cq, @marshal, @unmarshal)
+      bd = BidiCall.new(@call, @marshal, @unmarshal,
+                        metadata_received: @metadata_received)
       bd.run_on_server(gen_each_reply)
     end
 
@@ -449,9 +434,9 @@ module GRPC
     # @param metadata [Hash] metadata to be sent to the server. If a value is
     # a list, multiple metadata for its key are sent
     def start_call(metadata = {})
-      return if @started
-      @metadata_tag = ActiveCall.client_invoke(@call, @cq, metadata)
-      @started = true
+      return if @metadata_sent
+      @metadata_tag = ActiveCall.client_invoke(@call, metadata)
+      @metadata_sent = true
     end
 
     def self.view_class(*visible_methods)
@@ -468,18 +453,18 @@ module GRPC
 
     # SingleReqView limits access to an ActiveCall's methods for use in server
     # handlers that receive just one request.
-    SingleReqView = view_class(:cancelled, :deadline, :metadata,
+    SingleReqView = view_class(:cancelled?, :deadline, :metadata,
                                :output_metadata, :peer, :peer_cert)
 
     # MultiReqView limits access to an ActiveCall's methods for use in
     # server client_streamer handlers.
-    MultiReqView = view_class(:cancelled, :deadline, :each_queued_msg,
+    MultiReqView = view_class(:cancelled?, :deadline, :each_queued_msg,
                               :each_remote_read, :metadata, :output_metadata)
 
     # Operation limits access to an ActiveCall's methods for use as
     # a Operation on the client.
-    Operation = view_class(:cancel, :cancelled, :deadline, :execute,
+    Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
                            :metadata, :status, :start_call, :wait, :write_flag,
-                           :write_flag=)
+                           :write_flag=, :trailing_metadata)
   end
 end
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 238f409a1d6019a16f5f49d71f6bc2904fa38a4f..425dc3e5198756420b111c8f385208b2314fee4a 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -52,23 +52,18 @@ module GRPC
     # deadline is the absolute deadline for the call.
     #
     # @param call [Call] the call used by the ActiveCall
-    # @param q [CompletionQueue] the completion queue used to accept
-    #          the call
     # @param marshal [Function] f(obj)->string that marshal requests
     # @param unmarshal [Function] f(string)->obj that unmarshals responses
-    # @param metadata_tag [Object] tag object used to collect metadata
-    def initialize(call, q, marshal, unmarshal, metadata_tag: nil)
+    # @param metadata_received [true|false] indicates if metadata has already
+    #     been received. Should always be true for server calls
+    def initialize(call, marshal, unmarshal, metadata_received: false)
       fail(ArgumentError, 'not a call') unless call.is_a? Core::Call
-      unless q.is_a? Core::CompletionQueue
-        fail(ArgumentError, 'not a CompletionQueue')
-      end
       @call = call
-      @cq = q
       @marshal = marshal
       @op_notifier = nil  # signals completion on clients
       @readq = Queue.new
       @unmarshal = unmarshal
-      @metadata_tag = metadata_tag
+      @metadata_received = metadata_received
       @reads_complete = false
       @writes_complete = false
       @complete = false
@@ -124,7 +119,6 @@ module GRPC
       @done_mutex.synchronize do
         return unless @reads_complete && @writes_complete && !@complete
         @call.close
-        @cq.close
         @complete = true
       end
     end
@@ -132,11 +126,11 @@ module GRPC
     # performs a read using @call.run_batch, ensures metadata is set up
     def read_using_run_batch
       ops = { RECV_MESSAGE => nil }
-      ops[RECV_INITIAL_METADATA] = nil unless @metadata_tag.nil?
-      batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops)
-      unless @metadata_tag.nil?
+      ops[RECV_INITIAL_METADATA] = nil unless @metadata_received
+      batch_result = @call.run_batch(ops)
+      unless @metadata_received
         @call.metadata = batch_result.metadata
-        @metadata_tag = nil
+        @metadata_received = true
       end
       batch_result
     end
@@ -161,20 +155,26 @@ module GRPC
 
     def write_loop(requests, is_client: true)
       GRPC.logger.debug('bidi-write-loop: starting')
-      write_tag = Object.new
       count = 0
       requests.each do |req|
         GRPC.logger.debug("bidi-write-loop: #{count}")
         count += 1
         payload = @marshal.call(req)
-        @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
-                        SEND_MESSAGE => payload)
+        # Fails if status already received
+        begin
+          @call.run_batch(SEND_MESSAGE => payload)
+        rescue GRPC::Core::CallError => e
+          # This is almost definitely caused by a status arriving while still
+          # writing. Don't re-throw the error
+          GRPC.logger.warn('bidi-write-loop: ended with error')
+          GRPC.logger.warn(e)
+          break
+        end
       end
       GRPC.logger.debug("bidi-write-loop: #{count} writes done")
       if is_client
         GRPC.logger.debug("bidi-write-loop: client sent #{count}, waiting")
-        @call.run_batch(@cq, write_tag, INFINITE_FUTURE,
-                        SEND_CLOSE_FROM_CLIENT => nil)
+        @call.run_batch(SEND_CLOSE_FROM_CLIENT => nil)
         GRPC.logger.debug('bidi-write-loop: done')
         notify_done
         @writes_complete = true
@@ -195,7 +195,6 @@ module GRPC
       Thread.new do
         GRPC.logger.debug('bidi-read-loop: starting')
         begin
-          read_tag = Object.new
           count = 0
           # queue the initial read before beginning the loop
           loop do
@@ -208,8 +207,7 @@ module GRPC
               GRPC.logger.debug("bidi-read-loop: null batch #{batch_result}")
 
               if is_client
-                batch_result = @call.run_batch(@cq, read_tag, INFINITE_FUTURE,
-                                               RECV_STATUS_ON_CLIENT => nil)
+                batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
                 @call.status = batch_result.status
                 batch_result.check_status
                 GRPC.logger.debug("bidi-read-loop: done status #{@call.status}")
diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb
index cddca13d173a6f469723f66ccc6c8e393831bd36..9d6bd3bf59001ebb429de873b1c120512fa831df 100644
--- a/src/ruby/lib/grpc/generic/client_stub.rb
+++ b/src/ruby/lib/grpc/generic/client_stub.rb
@@ -90,19 +90,16 @@ module GRPC
     # when present, this is the default timeout used for calls
     #
     # @param host [String] the host the stub connects to
-    # @param q [Core::CompletionQueue] used to wait for events - now deprecated
-    #        since each new active call gets its own separately
     # @param creds [Core::ChannelCredentials|Symbol] the channel credentials, or
     #     :this_channel_is_insecure
     # @param channel_override [Core::Channel] a pre-created channel
     # @param timeout [Number] the default timeout to use in requests
     # @param channel_args [Hash] the channel arguments
-    def initialize(host, q, creds,
+    def initialize(host, creds,
                    channel_override: nil,
                    timeout: nil,
                    propagate_mask: nil,
                    channel_args: {})
-      fail(TypeError, '!CompletionQueue') unless q.is_a?(Core::CompletionQueue)
       @ch = ClientStub.setup_channel(channel_override, host, creds,
                                      channel_args)
       alt_host = channel_args[Core::Channel::SSL_TARGET]
@@ -441,15 +438,13 @@ module GRPC
 
       deadline = from_relative_time(@timeout) if deadline.nil?
       # Provide each new client call with its own completion queue
-      call_queue = Core::CompletionQueue.new
-      call = @ch.create_call(call_queue,
-                             parent, # parent call
+      call = @ch.create_call(parent, # parent call
                              @propagate_mask, # propagation options
                              method,
                              nil, # host use nil,
                              deadline)
       call.set_credentials! credentials unless credentials.nil?
-      ActiveCall.new(call, call_queue, marshal, unmarshal, deadline,
+      ActiveCall.new(call, marshal, unmarshal, deadline,
                      started: false)
     end
   end
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index ab7333d1337816029a90aaeff31aeffec70ab900..c92a532a50085ab03ec6d2c6bac66bc377d56937 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -159,16 +159,6 @@ module GRPC
     # Signal check period is 0.25s
     SIGNAL_CHECK_PERIOD = 0.25
 
-    # setup_cq is used by #initialize to constuct a Core::CompletionQueue from
-    # its arguments.
-    def self.setup_cq(alt_cq)
-      return Core::CompletionQueue.new if alt_cq.nil?
-      unless alt_cq.is_a? Core::CompletionQueue
-        fail(TypeError, '!CompletionQueue')
-      end
-      alt_cq
-    end
-
     # setup_connect_md_proc is used by #initialize to validate the
     # connect_md_proc.
     def self.setup_connect_md_proc(a_proc)
@@ -191,10 +181,6 @@ module GRPC
     # * pool_size: the size of the thread pool the server uses to run its
     # threads
     #
-    # * completion_queue_override: when supplied, this will be used as the
-    # completion_queue that the server uses to receive network events,
-    # otherwise its creates a new instance itself
-    #
     # * creds: [GRPC::Core::ServerCredentials]
     # the credentials used to secure the server
     #
@@ -212,11 +198,9 @@ module GRPC
     def initialize(pool_size:DEFAULT_POOL_SIZE,
                    max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
                    poll_period:DEFAULT_POLL_PERIOD,
-                   completion_queue_override:nil,
                    connect_md_proc:nil,
                    server_args:{})
       @connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
-      @cq = RpcServer.setup_cq(completion_queue_override)
       @max_waiting_requests = max_waiting_requests
       @poll_period = poll_period
       @pool_size = pool_size
@@ -226,7 +210,7 @@ module GRPC
       # running_state can take 4 values: :not_started, :running, :stopping, and
       # :stopped. State transitions can only proceed in that order.
       @running_state = :not_started
-      @server = Core::Server.new(@cq, server_args)
+      @server = Core::Server.new(server_args)
     end
 
     # stops a running server
@@ -240,7 +224,7 @@ module GRPC
         transition_running_state(:stopping)
       end
       deadline = from_relative_time(@poll_period)
-      @server.close(@cq, deadline)
+      @server.close(deadline)
       @pool.stop
     end
 
@@ -355,7 +339,8 @@ module GRPC
       return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
       GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
       noop = proc { |x| x }
-      c = ActiveCall.new(an_rpc.call, an_rpc.cq, noop, noop, an_rpc.deadline)
+      c = ActiveCall.new(an_rpc.call, noop, noop, an_rpc.deadline,
+                         metadata_received: true)
       c.send_status(GRPC::Core::StatusCodes::RESOURCE_EXHAUSTED, '')
       nil
     end
@@ -366,7 +351,8 @@ module GRPC
       return an_rpc if rpc_descs.key?(mth)
       GRPC.logger.warn("UNIMPLEMENTED: #{an_rpc}")
       noop = proc { |x| x }
-      c = ActiveCall.new(an_rpc.call, an_rpc.cq, noop, noop, an_rpc.deadline)
+      c = ActiveCall.new(an_rpc.call, noop, noop, an_rpc.deadline,
+                         metadata_received: true)
       c.send_status(GRPC::Core::StatusCodes::UNIMPLEMENTED, '')
       nil
     end
@@ -374,11 +360,9 @@ module GRPC
     # handles calls to the server
     def loop_handle_server_calls
       fail 'not started' if running_state == :not_started
-      loop_tag = Object.new
       while running_state == :running
         begin
-          comp_queue = Core::CompletionQueue.new
-          an_rpc = @server.request_call(comp_queue, loop_tag, INFINITE_FUTURE)
+          an_rpc = @server.request_call
           break if (!an_rpc.nil?) && an_rpc.call.nil?
           active_call = new_active_server_call(an_rpc)
           unless active_call.nil?
@@ -410,15 +394,13 @@ module GRPC
       return nil if an_rpc.nil? || an_rpc.call.nil?
 
       # allow the metadata to be accessed from the call
-      handle_call_tag = Object.new
       an_rpc.call.metadata = an_rpc.metadata  # attaches md to call for handlers
       GRPC.logger.debug("call md is #{an_rpc.metadata}")
       connect_md = nil
       unless @connect_md_proc.nil?
         connect_md = @connect_md_proc.call(an_rpc.method, an_rpc.metadata)
       end
-      an_rpc.call.run_batch(an_rpc.cq, handle_call_tag, INFINITE_FUTURE,
-                            SEND_INITIAL_METADATA => connect_md)
+      an_rpc.call.run_batch(SEND_INITIAL_METADATA => connect_md)
 
       return nil unless available?(an_rpc)
       return nil unless implemented?(an_rpc)
@@ -426,9 +408,9 @@ module GRPC
       # Create the ActiveCall
       GRPC.logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
       rpc_desc = rpc_descs[an_rpc.method.to_sym]
-      c = ActiveCall.new(an_rpc.call, an_rpc.cq,
-                         rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
-                         an_rpc.deadline)
+      c = ActiveCall.new(an_rpc.call, rpc_desc.marshal_proc,
+                         rpc_desc.unmarshal_proc(:input), an_rpc.deadline,
+                         metadata_received: true)
       mth = an_rpc.method.to_sym
       [c, mth]
     end
diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb
index f30242ee801b7c29d9127447ab0dfb81dc78da8b..7cb9f1cc99d95448d5728095d43f9bd478d15150 100644
--- a/src/ruby/lib/grpc/generic/service.rb
+++ b/src/ruby/lib/grpc/generic/service.rb
@@ -168,7 +168,7 @@ module GRPC
           # @param kw [KeywordArgs] the channel arguments, plus any optional
           #                         args for configuring the client's channel
           def initialize(host, creds, **kw)
-            super(host, Core::CompletionQueue.new, creds, **kw)
+            super(host, creds, **kw)
           end
 
           # Used define_method to add a method for each rpc_desc.  Each method
diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb
index 01c8c5ac8f19bc3870cd3cb0c69cf0aa4129bdf4..6e62af94d4373386cc9418d44a2fa4babcd9a498 100644
--- a/src/ruby/lib/grpc/version.rb
+++ b/src/ruby/lib/grpc/version.rb
@@ -29,5 +29,5 @@
 
 # GRPC contains the General RPC module.
 module GRPC
-  VERSION = '0.15.0.dev'
+  VERSION = '1.1.0.dev'
 end
diff --git a/src/ruby/pb/test/client.rb b/src/ruby/pb/test/client.rb
index b6695482a286e8626080e505c1b793bfd9bcef3f..066a7bb90f17fcfc53876606afaf3df7b753312b 100755
--- a/src/ruby/pb/test/client.rb
+++ b/src/ruby/pb/test/client.rb
@@ -197,6 +197,25 @@ class PingPongPlayer
   end
 end
 
+class BlockingEnumerator
+  include Grpc::Testing
+  include Grpc::Testing::PayloadType
+
+  def initialize(req_size, sleep_time)
+    @req_size = req_size
+    @sleep_time = sleep_time
+  end
+
+  def each_item
+    return enum_for(:each_item) unless block_given?
+    req_cls = StreamingOutputCallRequest
+    req = req_cls.new(payload: Payload.new(body: nulls(@req_size)))
+    yield req
+    # Sleep until after the deadline should have passed
+    sleep(@sleep_time)
+  end
+end
+
 # defines methods corresponding to each interop test case.
 class NamedTests
   include Grpc::Testing
@@ -315,11 +334,10 @@ class NamedTests
   end
 
   def timeout_on_sleeping_server
-    msg_sizes = [[27_182, 31_415]]
-    ppp = PingPongPlayer.new(msg_sizes)
-    deadline = GRPC::Core::TimeConsts::from_relative_time(0.001)
-    resps = @stub.full_duplex_call(ppp.each_item, deadline: deadline)
-    resps.each { |r| ppp.queue.push(r) }
+    enum = BlockingEnumerator.new(27_182, 2)
+    deadline = GRPC::Core::TimeConsts::from_relative_time(1)
+    resps = @stub.full_duplex_call(enum.each_item, deadline: deadline)
+    resps.each { } # wait to receive each request (or timeout)
     fail 'Should have raised GRPC::BadStatus(DEADLINE_EXCEEDED)'
   rescue GRPC::BadStatus => e
     assert("#{__callee__}: status was wrong") do
@@ -351,7 +369,7 @@ class NamedTests
     op.execute
     fail 'Should have raised GRPC:Cancelled'
   rescue GRPC::Cancelled
-    assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled }
+    assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled? }
   end
 
   def cancel_after_first_response
@@ -362,7 +380,7 @@ class NamedTests
     op.execute.each { |r| ppp.queue.push(r) }
     fail 'Should have raised GRPC:Cancelled'
   rescue GRPC::Cancelled
-    assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled }
+    assert("#{__callee__}: call operation should be CANCELLED") { op.cancelled? }
     op.wait
   end
 
diff --git a/src/ruby/pb/test/server.rb b/src/ruby/pb/test/server.rb
index 914c7cc79d7756c0b75a3e44f7a9699567a52546..088f281dc47c2a22a526ec5f28ba1fb7a0c600eb 100755
--- a/src/ruby/pb/test/server.rb
+++ b/src/ruby/pb/test/server.rb
@@ -188,11 +188,13 @@ class TestTarget < Grpc::Testing::TestService::Service
       begin
         GRPC.logger.info('interop-server: started receiving')
         reqs.each do |req|
-          resp_size = req.response_parameters[0].size
-          GRPC.logger.info("read a req, response size is #{resp_size}")
-          resp = cls.new(payload: Payload.new(type: req.response_type,
-                                              body: nulls(resp_size)))
-          q.push(resp)
+          req.response_parameters.each do |params|
+            resp_size = params.size
+            GRPC.logger.info("read a req, response size is #{resp_size}")
+            resp = cls.new(payload: Payload.new(type: req.response_type,
+                                                body: nulls(resp_size)))
+            q.push(resp)
+          end
         end
         GRPC.logger.info('interop-server: finished receiving')
         q.push(self)
diff --git a/src/ruby/spec/call_spec.rb b/src/ruby/spec/call_spec.rb
index ae3ce0748a2bd1478ab75520d39860157a524610..1c44b333de7f85f7aeaab2e783d934c6348be27d 100644
--- a/src/ruby/spec/call_spec.rb
+++ b/src/ruby/spec/call_spec.rb
@@ -96,7 +96,6 @@ describe GRPC::Core::CallOps do
 end
 
 describe GRPC::Core::Call do
-  let(:client_queue) { GRPC::Core::CompletionQueue.new }
   let(:test_tag)  { Object.new }
   let(:fake_host) { 'localhost:10101' }
 
@@ -154,7 +153,7 @@ describe GRPC::Core::Call do
   end
 
   def make_test_call
-    @ch.create_call(client_queue, nil, nil, 'dummy_method', nil, deadline)
+    @ch.create_call(nil, nil, 'dummy_method', nil, deadline)
   end
 
   def deadline
diff --git a/src/ruby/spec/channel_spec.rb b/src/ruby/spec/channel_spec.rb
index 355f95c9d798ed930b08e1af00e7efba83f1e24f..740eac631a3e40a1697da49954a2db8f360e0c5b 100644
--- a/src/ruby/spec/channel_spec.rb
+++ b/src/ruby/spec/channel_spec.rb
@@ -37,7 +37,6 @@ end
 
 describe GRPC::Core::Channel do
   let(:fake_host) { 'localhost:0' }
-  let(:cq) { GRPC::Core::CompletionQueue.new }
 
   def create_test_cert
     GRPC::Core::ChannelCredentials.new(load_test_certs[0])
@@ -122,7 +121,7 @@ describe GRPC::Core::Channel do
       deadline = Time.now + 5
 
       blk = proc do
-        ch.create_call(cq, nil, nil, 'dummy_method', nil, deadline)
+        ch.create_call(nil, nil, 'dummy_method', nil, deadline)
       end
       expect(&blk).to_not raise_error
     end
@@ -133,7 +132,7 @@ describe GRPC::Core::Channel do
 
       deadline = Time.now + 5
       blk = proc do
-        ch.create_call(cq, nil, nil, 'dummy_method', nil, deadline)
+        ch.create_call(nil, nil, 'dummy_method', nil, deadline)
       end
       expect(&blk).to raise_error(RuntimeError)
     end
diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb
index aedeca272d35633ea971690f759c9786e18c6d7c..d9df0b9ae246004f824bc4b7b2ac884d92c130cf 100644
--- a/src/ruby/spec/client_server_spec.rb
+++ b/src/ruby/spec/client_server_spec.rb
@@ -34,27 +34,23 @@ include GRPC::Core
 shared_context 'setup: tags' do
   let(:sent_message) { 'sent message' }
   let(:reply_text) { 'the reply' }
-  before(:example) do
-    @client_tag = Object.new
-    @server_tag = Object.new
-  end
 
   def deadline
     Time.now + 5
   end
 
-  def server_allows_client_to_proceed
-    recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+  def server_allows_client_to_proceed(metadata = {})
+    recvd_rpc = @server.request_call
     expect(recvd_rpc).to_not eq nil
     server_call = recvd_rpc.call
-    ops = { CallOps::SEND_INITIAL_METADATA => {} }
-    svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline, ops)
+    ops = { CallOps::SEND_INITIAL_METADATA => metadata }
+    svr_batch = server_call.run_batch(ops)
     expect(svr_batch.send_metadata).to be true
     server_call
   end
 
   def new_client_call
-    @ch.create_call(@client_queue, nil, nil, '/method', nil, deadline)
+    @ch.create_call(nil, nil, '/method', nil, deadline)
   end
 end
 
@@ -91,8 +87,7 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::SEND_INITIAL_METADATA => {},
       CallOps::SEND_MESSAGE => sent_message
     }
-    batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                  client_ops)
+    batch_result = call.run_batch(client_ops)
     expect(batch_result.send_metadata).to be true
     expect(batch_result.send_message).to be true
 
@@ -101,8 +96,7 @@ shared_examples 'basic GRPC message delivery is OK' do
     server_ops = {
       CallOps::RECV_MESSAGE => nil
     }
-    svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
-                                      server_ops)
+    svr_batch = server_call.run_batch(server_ops)
     expect(svr_batch.message).to eq(sent_message)
   end
 
@@ -118,8 +112,7 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::SEND_INITIAL_METADATA => {},
       CallOps::SEND_MESSAGE => sent_message
     }
-    batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                  client_ops)
+    batch_result = call.run_batch(client_ops)
     expect(batch_result.send_metadata).to be true
     expect(batch_result.send_message).to be true
 
@@ -129,12 +122,50 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::RECV_MESSAGE => nil,
       CallOps::SEND_MESSAGE => reply_text
     }
-    svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
-                                      server_ops)
+    svr_batch = server_call.run_batch(server_ops)
     expect(svr_batch.message).to eq(sent_message)
     expect(svr_batch.send_message).to be true
   end
 
+  it 'compressed messages can be sent and received' do
+    call = new_client_call
+    server_call = nil
+    long_request_str = '0' * 2000
+    long_response_str = '1' * 2000
+    md = { 'grpc-internal-encoding-request' => 'gzip' }
+
+    server_thread = Thread.new do
+      server_call = server_allows_client_to_proceed(md)
+    end
+
+    client_ops = {
+      CallOps::SEND_INITIAL_METADATA => md,
+      CallOps::SEND_MESSAGE => long_request_str
+    }
+    batch_result = call.run_batch(client_ops)
+    expect(batch_result.send_metadata).to be true
+    expect(batch_result.send_message).to be true
+
+    # confirm the server can read the inbound message
+    server_thread.join
+    server_ops = {
+      CallOps::RECV_MESSAGE => nil,
+      CallOps::SEND_MESSAGE => long_response_str
+    }
+    svr_batch = server_call.run_batch(server_ops)
+    expect(svr_batch.message).to eq(long_request_str)
+    expect(svr_batch.send_message).to be true
+
+    client_ops = {
+      CallOps::SEND_CLOSE_FROM_CLIENT => nil,
+      CallOps::RECV_INITIAL_METADATA => nil,
+      CallOps::RECV_MESSAGE => nil
+    }
+    batch_result = call.run_batch(client_ops)
+    expect(batch_result.send_close).to be true
+    expect(batch_result.message).to eq long_response_str
+  end
+
   it 'servers can ignore a client write and send a status' do
     call = new_client_call
     server_call = nil
@@ -147,8 +178,7 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::SEND_INITIAL_METADATA => {},
       CallOps::SEND_MESSAGE => sent_message
     }
-    batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                  client_ops)
+    batch_result = call.run_batch(client_ops)
     expect(batch_result.send_metadata).to be true
     expect(batch_result.send_message).to be true
 
@@ -158,8 +188,7 @@ shared_examples 'basic GRPC message delivery is OK' do
     server_ops = {
       CallOps::SEND_STATUS_FROM_SERVER => the_status
     }
-    svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
-                                      server_ops)
+    svr_batch = server_call.run_batch(server_ops)
     expect(svr_batch.message).to eq nil
     expect(svr_batch.send_status).to be true
   end
@@ -176,8 +205,7 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::SEND_INITIAL_METADATA => {},
       CallOps::SEND_MESSAGE => sent_message
     }
-    batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                  client_ops)
+    batch_result = call.run_batch(client_ops)
     expect(batch_result.send_metadata).to be true
     expect(batch_result.send_message).to be true
 
@@ -189,8 +217,7 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::SEND_MESSAGE => reply_text,
       CallOps::SEND_STATUS_FROM_SERVER => the_status
     }
-    svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
-                                      server_ops)
+    svr_batch = server_call.run_batch(server_ops)
     expect(svr_batch.message).to eq sent_message
     expect(svr_batch.send_status).to be true
     expect(svr_batch.send_message).to be true
@@ -202,8 +229,7 @@ shared_examples 'basic GRPC message delivery is OK' do
       CallOps::RECV_MESSAGE => nil,
       CallOps::RECV_STATUS_ON_CLIENT => nil
     }
-    batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                  client_ops)
+    batch_result = call.run_batch(client_ops)
     expect(batch_result.send_close).to be true
     expect(batch_result.message).to eq reply_text
     expect(batch_result.status).to eq the_status
@@ -212,8 +238,7 @@ shared_examples 'basic GRPC message delivery is OK' do
     server_ops = {
       CallOps::RECV_CLOSE_ON_SERVER => nil
     }
-    svr_batch = server_call.run_batch(@server_queue, @server_tag, deadline,
-                                      server_ops)
+    svr_batch = server_call.run_batch(server_ops)
     expect(svr_batch.send_close).to be true
   end
 end
@@ -244,8 +269,7 @@ shared_examples 'GRPC metadata delivery works OK' do
           CallOps::SEND_INITIAL_METADATA => md
         }
         blk = proc do
-          call.run_batch(@client_queue, @client_tag, deadline,
-                         client_ops)
+          call.run_batch(client_ops)
         end
         expect(&blk).to raise_error
       end
@@ -255,15 +279,14 @@ shared_examples 'GRPC metadata delivery works OK' do
       @valid_metadata.each do |md|
         recvd_rpc = nil
         rcv_thread = Thread.new do
-          recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+          recvd_rpc = @server.request_call
         end
 
         call = new_client_call
         client_ops = {
           CallOps::SEND_INITIAL_METADATA => md
         }
-        batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                      client_ops)
+        batch_result = call.run_batch(client_ops)
         expect(batch_result.send_metadata).to be true
 
         # confirm the server can receive the client metadata
@@ -296,7 +319,7 @@ shared_examples 'GRPC metadata delivery works OK' do
       @bad_keys.each do |md|
         recvd_rpc = nil
         rcv_thread = Thread.new do
-          recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+          recvd_rpc = @server.request_call
         end
 
         call = new_client_call
@@ -305,7 +328,7 @@ shared_examples 'GRPC metadata delivery works OK' do
         client_ops = {
           CallOps::SEND_INITIAL_METADATA => nil
         }
-        call.run_batch(@client_queue, @client_tag, deadline, client_ops)
+        call.run_batch(client_ops)
 
         # server gets the invocation
         rcv_thread.join
@@ -314,8 +337,7 @@ shared_examples 'GRPC metadata delivery works OK' do
           CallOps::SEND_INITIAL_METADATA => md
         }
         blk = proc do
-          recvd_rpc.call.run_batch(@server_queue, @server_tag, deadline,
-                                   server_ops)
+          recvd_rpc.call.run_batch(server_ops)
         end
         expect(&blk).to raise_error
       end
@@ -324,7 +346,7 @@ shared_examples 'GRPC metadata delivery works OK' do
     it 'sends an empty hash if no metadata is added' do
       recvd_rpc = nil
       rcv_thread = Thread.new do
-        recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+        recvd_rpc = @server.request_call
       end
 
       call = new_client_call
@@ -333,7 +355,7 @@ shared_examples 'GRPC metadata delivery works OK' do
       client_ops = {
         CallOps::SEND_INITIAL_METADATA => nil
       }
-      call.run_batch(@client_queue, @client_tag, deadline, client_ops)
+      call.run_batch(client_ops)
 
       # server gets the invocation but sends no metadata back
       rcv_thread.join
@@ -342,14 +364,13 @@ shared_examples 'GRPC metadata delivery works OK' do
       server_ops = {
         CallOps::SEND_INITIAL_METADATA => nil
       }
-      server_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
+      server_call.run_batch(server_ops)
 
       # client receives nothing as expected
       client_ops = {
         CallOps::RECV_INITIAL_METADATA => nil
       }
-      batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                    client_ops)
+      batch_result = call.run_batch(client_ops)
       expect(batch_result.metadata).to eq({})
     end
 
@@ -357,7 +378,7 @@ shared_examples 'GRPC metadata delivery works OK' do
       @valid_metadata.each do |md|
         recvd_rpc = nil
         rcv_thread = Thread.new do
-          recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+          recvd_rpc = @server.request_call
         end
 
         call = new_client_call
@@ -366,7 +387,7 @@ shared_examples 'GRPC metadata delivery works OK' do
         client_ops = {
           CallOps::SEND_INITIAL_METADATA => nil
         }
-        call.run_batch(@client_queue, @client_tag, deadline, client_ops)
+        call.run_batch(client_ops)
 
         # server gets the invocation but sends no metadata back
         rcv_thread.join
@@ -375,14 +396,13 @@ shared_examples 'GRPC metadata delivery works OK' do
         server_ops = {
           CallOps::SEND_INITIAL_METADATA => md
         }
-        server_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
+        server_call.run_batch(server_ops)
 
         # client receives nothing as expected
         client_ops = {
           CallOps::RECV_INITIAL_METADATA => nil
         }
-        batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                      client_ops)
+        batch_result = call.run_batch(client_ops)
         replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
         expect(batch_result.metadata).to eq(replace_symbols)
       end
@@ -393,9 +413,7 @@ end
 describe 'the http client/server' do
   before(:example) do
     server_host = '0.0.0.0:0'
-    @client_queue = GRPC::Core::CompletionQueue.new
-    @server_queue = GRPC::Core::CompletionQueue.new
-    @server = GRPC::Core::Server.new(@server_queue, nil)
+    @server = GRPC::Core::Server.new(nil)
     server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
     @server.start
     @ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
@@ -403,7 +421,7 @@ describe 'the http client/server' do
 
   after(:example) do
     @ch.close
-    @server.close(@server_queue, deadline)
+    @server.close(deadline)
   end
 
   it_behaves_like 'basic GRPC message delivery is OK' do
@@ -425,11 +443,9 @@ describe 'the secure http client/server' do
   before(:example) do
     certs = load_test_certs
     server_host = '0.0.0.0:0'
-    @client_queue = GRPC::Core::CompletionQueue.new
-    @server_queue = GRPC::Core::CompletionQueue.new
     server_creds = GRPC::Core::ServerCredentials.new(
       nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
-    @server = GRPC::Core::Server.new(@server_queue, nil)
+    @server = GRPC::Core::Server.new(nil)
     server_port = @server.add_http2_port(server_host, server_creds)
     @server.start
     args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
@@ -438,7 +454,7 @@ describe 'the secure http client/server' do
   end
 
   after(:example) do
-    @server.close(@server_queue, deadline)
+    @server.close(deadline)
   end
 
   it_behaves_like 'basic GRPC message delivery is OK' do
@@ -454,7 +470,7 @@ describe 'the secure http client/server' do
     expected_md = { 'k1' => 'updated-v1', 'k2' => 'v2' }
     recvd_rpc = nil
     rcv_thread = Thread.new do
-      recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+      recvd_rpc = @server.request_call
     end
 
     call = new_client_call
@@ -462,8 +478,7 @@ describe 'the secure http client/server' do
     client_ops = {
       CallOps::SEND_INITIAL_METADATA => md
     }
-    batch_result = call.run_batch(@client_queue, @client_tag, deadline,
-                                  client_ops)
+    batch_result = call.run_batch(client_ops)
     expect(batch_result.send_metadata).to be true
 
     # confirm the server can receive the client metadata
diff --git a/src/ruby/spec/completion_queue_spec.rb b/src/ruby/spec/completion_queue_spec.rb
deleted file mode 100644
index 886a7f263ba21058f1b6c8e0fcc51bcb9158d331..0000000000000000000000000000000000000000
--- a/src/ruby/spec/completion_queue_spec.rb
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-require 'grpc'
-
-describe GRPC::Core::CompletionQueue do
-  before(:example) do
-    @cq = GRPC::Core::CompletionQueue.new
-  end
-
-  describe '#new' do
-    it 'is constructed successufully' do
-      expect { GRPC::Core::CompletionQueue.new }.not_to raise_error
-    end
-  end
-end
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index d9c9780c93b3ba7d94f7b70f0d4606a5d805d27a..018580e0dfa51e674e07803653e62dc534d67a54 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -39,13 +39,8 @@ describe GRPC::ActiveCall do
 
   before(:each) do
     @pass_through = proc { |x| x }
-    @server_tag = Object.new
-    @tag = Object.new
-
-    @client_queue = GRPC::Core::CompletionQueue.new
-    @server_queue = GRPC::Core::CompletionQueue.new
     host = '0.0.0.0:0'
-    @server = GRPC::Core::Server.new(@server_queue, nil)
+    @server = GRPC::Core::Server.new(nil)
     server_port = @server.add_http2_port(host, :this_port_is_insecure)
     @server.start
     @ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
@@ -53,21 +48,20 @@ describe GRPC::ActiveCall do
   end
 
   after(:each) do
-    @server.close(@server_queue, deadline)
+    @server.close(deadline)
   end
 
   describe 'restricted view methods' do
     before(:each) do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      @client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                    @pass_through, deadline,
-                                    metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      @client_call = ActiveCall.new(call, @pass_through,
+                                    @pass_through, deadline)
     end
 
     describe '#multi_req_view' do
       it 'exposes a fixed subset of the ActiveCall methods' do
-        want = %w(cancelled, deadline, each_remote_read, metadata, shutdown)
+        want = %w(cancelled?, deadline, each_remote_read, metadata, shutdown)
         v = @client_call.multi_req_view
         want.each do |w|
           expect(v.methods.include?(w))
@@ -77,7 +71,7 @@ describe GRPC::ActiveCall do
 
     describe '#single_req_view' do
       it 'exposes a fixed subset of the ActiveCall methods' do
-        want = %w(cancelled, deadline, metadata, shutdown)
+        want = %w(cancelled?, deadline, metadata, shutdown)
         v = @client_call.single_req_view
         want.each do |w|
           expect(v.methods.include?(w))
@@ -89,46 +83,42 @@ describe GRPC::ActiveCall do
   describe '#remote_send' do
     it 'allows a client to send a payload to the server' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      @client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                    @pass_through, deadline,
-                                    metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      @client_call = ActiveCall.new(call, @pass_through,
+                                    @pass_through, deadline)
       msg = 'message is a string'
       @client_call.remote_send(msg)
 
       # check that server rpc new was received
-      recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline)
+      recvd_rpc = @server.request_call
       expect(recvd_rpc).to_not eq nil
       recvd_call = recvd_rpc.call
 
       # Accept the call, and verify that the server reads the response ok.
-      server_ops = {
-        CallOps::SEND_INITIAL_METADATA => {}
-      }
-      recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
-      server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through,
-                                   @pass_through, deadline)
+      server_call = ActiveCall.new(recvd_call, @pass_through,
+                                   @pass_through, deadline,
+                                   metadata_received: true)
       expect(server_call.remote_read).to eq(msg)
     end
 
     it 'marshals the payload using the marshal func' do
       call = make_test_call
-      ActiveCall.client_invoke(call, @client_queue)
+      ActiveCall.client_invoke(call)
       marshal = proc { |x| 'marshalled:' + x }
-      client_call = ActiveCall.new(call, @client_queue, marshal,
-                                   @pass_through, deadline)
+      client_call = ActiveCall.new(call, marshal, @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
 
       # confirm that the message was marshalled
-      recvd_rpc =  @server.request_call(@server_queue, @server_tag, deadline)
+      recvd_rpc =  @server.request_call
       recvd_call = recvd_rpc.call
       server_ops = {
         CallOps::SEND_INITIAL_METADATA => nil
       }
-      recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
-      server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through,
-                                   @pass_through, deadline)
+      recvd_call.run_batch(server_ops)
+      server_call = ActiveCall.new(recvd_call, @pass_through,
+                                   @pass_through, deadline,
+                                   metadata_received: true)
       expect(server_call.remote_read).to eq('marshalled:' + msg)
     end
 
@@ -136,23 +126,24 @@ describe GRPC::ActiveCall do
     TEST_WRITE_FLAGS.each do |f|
       it "successfully makes calls with write_flag set to #{f}" do
         call = make_test_call
-        ActiveCall.client_invoke(call, @client_queue)
+        ActiveCall.client_invoke(call)
         marshal = proc { |x| 'marshalled:' + x }
-        client_call = ActiveCall.new(call, @client_queue, marshal,
+        client_call = ActiveCall.new(call, marshal,
                                      @pass_through, deadline)
         msg = 'message is a string'
         client_call.write_flag = f
         client_call.remote_send(msg)
 
         # confirm that the message was marshalled
-        recvd_rpc =  @server.request_call(@server_queue, @server_tag, deadline)
+        recvd_rpc =  @server.request_call
         recvd_call = recvd_rpc.call
         server_ops = {
           CallOps::SEND_INITIAL_METADATA => nil
         }
-        recvd_call.run_batch(@server_queue, @server_tag, deadline, server_ops)
-        server_call = ActiveCall.new(recvd_call, @server_queue, @pass_through,
-                                     @pass_through, deadline)
+        recvd_call.run_batch(server_ops)
+        server_call = ActiveCall.new(recvd_call, @pass_through,
+                                     @pass_through, deadline,
+                                     metadata_received: true)
         expect(server_call.remote_read).to eq('marshalled:' + msg)
       end
     end
@@ -162,8 +153,8 @@ describe GRPC::ActiveCall do
     it 'sends metadata to the server when present' do
       call = make_test_call
       metadata = { k1: 'v1', k2: 'v2' }
-      ActiveCall.client_invoke(call, @client_queue, metadata)
-      recvd_rpc =  @server.request_call(@server_queue, @server_tag, deadline)
+      ActiveCall.client_invoke(call, metadata)
+      recvd_rpc =  @server.request_call
       recvd_call = recvd_rpc.call
       expect(recvd_call).to_not be_nil
       expect(recvd_rpc.metadata).to_not be_nil
@@ -175,10 +166,9 @@ describe GRPC::ActiveCall do
   describe '#remote_read' do
     it 'reads the response sent by a server' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       server_call = expect_server_to_receive(msg)
@@ -188,10 +178,9 @@ describe GRPC::ActiveCall do
 
     it 'saves no metadata when the server adds no metadata' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       server_call = expect_server_to_receive(msg)
@@ -203,10 +192,9 @@ describe GRPC::ActiveCall do
 
     it 'saves metadata add by the server' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       server_call = expect_server_to_receive(msg, k1: 'v1', k2: 'v2')
@@ -219,10 +207,9 @@ describe GRPC::ActiveCall do
 
     it 'get a nil msg before a status when an OK status is sent' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       client_call.writes_done(false)
@@ -236,11 +223,10 @@ describe GRPC::ActiveCall do
 
     it 'unmarshals the response using the unmarshal func' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
+      ActiveCall.client_invoke(call)
       unmarshal = proc { |x| 'unmarshalled:' + x }
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   unmarshal, deadline,
-                                   metadata_tag: md_tag)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   unmarshal, deadline)
 
       # confirm the client receives the unmarshalled message
       msg = 'message is a string'
@@ -254,17 +240,16 @@ describe GRPC::ActiveCall do
   describe '#each_remote_read' do
     it 'creates an Enumerator' do
       call = make_test_call
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
+      client_call = ActiveCall.new(call, @pass_through,
                                    @pass_through, deadline)
       expect(client_call.each_remote_read).to be_a(Enumerator)
     end
 
     it 'the returns an enumerator that can read n responses' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       reply = 'server_response'
       client_call.remote_send(msg)
@@ -279,10 +264,9 @@ describe GRPC::ActiveCall do
 
     it 'the returns an enumerator that stops after an OK Status' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       reply = 'server_response'
       client_call.remote_send(msg)
@@ -302,10 +286,9 @@ describe GRPC::ActiveCall do
   describe '#writes_done' do
     it 'finishes ok if the server sends a status response' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       expect { client_call.writes_done(false) }.to_not raise_error
@@ -318,10 +301,9 @@ describe GRPC::ActiveCall do
 
     it 'finishes ok if the server sends an early status response' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       server_call = expect_server_to_receive(msg)
@@ -334,10 +316,9 @@ describe GRPC::ActiveCall do
 
     it 'finishes ok if writes_done is true' do
       call = make_test_call
-      md_tag = ActiveCall.client_invoke(call, @client_queue)
-      client_call = ActiveCall.new(call, @client_queue, @pass_through,
-                                   @pass_through, deadline,
-                                   metadata_tag: md_tag)
+      ActiveCall.client_invoke(call)
+      client_call = ActiveCall.new(call, @pass_through,
+                                   @pass_through, deadline)
       msg = 'message is a string'
       client_call.remote_send(msg)
       server_call = expect_server_to_receive(msg)
@@ -355,17 +336,16 @@ describe GRPC::ActiveCall do
   end
 
   def expect_server_to_be_invoked(**kw)
-    recvd_rpc =  @server.request_call(@server_queue, @server_tag, deadline)
+    recvd_rpc =  @server.request_call
     expect(recvd_rpc).to_not eq nil
     recvd_call = recvd_rpc.call
-    recvd_call.run_batch(@server_queue, @server_tag, deadline,
-                         CallOps::SEND_INITIAL_METADATA => kw)
-    ActiveCall.new(recvd_call, @server_queue, @pass_through,
-                   @pass_through, deadline)
+    recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => kw)
+    ActiveCall.new(recvd_call, @pass_through, @pass_through, deadline,
+                   metadata_received: true, started: true)
   end
 
   def make_test_call
-    @ch.create_call(@client_queue, nil, nil, '/method', nil, deadline)
+    @ch.create_call(nil, nil, '/method', nil, deadline)
   end
 
   def deadline
diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb
index 168e7fb7919978cc403b7bc3ab6285b028fbe59e..6034b5419c958de4f458cb890b8a2114c9be6dc1 100644
--- a/src/ruby/spec/generic/client_stub_spec.rb
+++ b/src/ruby/spec/generic/client_stub_spec.rb
@@ -29,11 +29,14 @@
 
 require 'grpc'
 
+Thread.abort_on_exception = true
+
 def wakey_thread(&blk)
   n = GRPC::Notifier.new
   t = Thread.new do
     blk.call(n)
   end
+  t.abort_on_exception = true
   n.wait
   t
 end
@@ -54,15 +57,13 @@ describe 'ClientStub' do
   before(:each) do
     Thread.abort_on_exception = true
     @server = nil
-    @server_queue = nil
     @method = 'an_rpc_method'
     @pass = OK
     @fail = INTERNAL
-    @cq = GRPC::Core::CompletionQueue.new
   end
 
   after(:each) do
-    @server.close(@server_queue) unless @server_queue.nil?
+    @server.close(from_relative_time(2)) unless @server.nil?
   end
 
   describe '#new' do
@@ -70,7 +71,7 @@ describe 'ClientStub' do
     it 'can be created from a host and args' do
       opts = { channel_args: { a_channel_arg: 'an_arg' } }
       blk = proc do
-        GRPC::ClientStub.new(fake_host, @cq, :this_channel_is_insecure, **opts)
+        GRPC::ClientStub.new(fake_host, :this_channel_is_insecure, **opts)
       end
       expect(&blk).not_to raise_error
     end
@@ -81,7 +82,7 @@ describe 'ClientStub' do
         channel_override: @ch
       }
       blk = proc do
-        GRPC::ClientStub.new(fake_host, @cq, :this_channel_is_insecure, **opts)
+        GRPC::ClientStub.new(fake_host, :this_channel_is_insecure, **opts)
       end
       expect(&blk).not_to raise_error
     end
@@ -92,7 +93,7 @@ describe 'ClientStub' do
           channel_args: { a_channel_arg: 'an_arg' },
           channel_override: Object.new
         }
-        GRPC::ClientStub.new(fake_host, @cq, :this_channel_is_insecure, **opts)
+        GRPC::ClientStub.new(fake_host, :this_channel_is_insecure, **opts)
       end
       expect(&blk).to raise_error
     end
@@ -100,7 +101,7 @@ describe 'ClientStub' do
     it 'cannot be created with bad credentials' do
       blk = proc do
         opts = { channel_args: { a_channel_arg: 'an_arg' } }
-        GRPC::ClientStub.new(fake_host, @cq, Object.new, **opts)
+        GRPC::ClientStub.new(fake_host, Object.new, **opts)
       end
       expect(&blk).to raise_error
     end
@@ -115,7 +116,7 @@ describe 'ClientStub' do
           }
         }
         creds = GRPC::Core::ChannelCredentials.new(certs[0], nil, nil)
-        GRPC::ClientStub.new(fake_host, @cq, creds,  **opts)
+        GRPC::ClientStub.new(fake_host, creds,  **opts)
       end
       expect(&blk).to_not raise_error
     end
@@ -130,7 +131,7 @@ describe 'ClientStub' do
       it 'should send a request to/receive a reply from a server' do
         server_port = create_test_server
         th = run_request_response(@sent_msg, @resp, @pass)
-        stub = GRPC::ClientStub.new("localhost:#{server_port}", @cq,
+        stub = GRPC::ClientStub.new("localhost:#{server_port}",
                                     :this_channel_is_insecure)
         expect(get_response(stub)).to eq(@resp)
         th.join
@@ -141,7 +142,7 @@ describe 'ClientStub' do
         host = "localhost:#{server_port}"
         th = run_request_response(@sent_msg, @resp, @pass,
                                   k1: 'v1', k2: 'v2')
-        stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
         expect(get_response(stub)).to eq(@resp)
         th.join
       end
@@ -151,7 +152,7 @@ describe 'ClientStub' do
         alt_host = "localhost:#{server_port}"
         th = run_request_response(@sent_msg, @resp, @pass)
         ch = GRPC::Core::Channel.new(alt_host, nil, :this_channel_is_insecure)
-        stub = GRPC::ClientStub.new('ignored-host', @cq,
+        stub = GRPC::ClientStub.new('ignored-host',
                                     :this_channel_is_insecure,
                                     channel_override: ch)
         expect(get_response(stub)).to eq(@resp)
@@ -162,7 +163,7 @@ describe 'ClientStub' do
         server_port = create_test_server
         host = "localhost:#{server_port}"
         th = run_request_response(@sent_msg, @resp, @fail)
-        stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
         blk = proc { get_response(stub) }
         expect(&blk).to raise_error(GRPC::BadStatus)
         th.join
@@ -182,7 +183,8 @@ describe 'ClientStub' do
       def get_response(stub)
         op = stub.request_response(@method, @sent_msg, noop, noop,
                                    return_op: true,
-                                   metadata: { k1: 'v1', k2: 'v2' })
+                                   metadata: { k1: 'v1', k2: 'v2' },
+                                   deadline: from_relative_time(2))
         expect(op).to be_a(GRPC::ActiveCall::Operation)
         op.execute
       end
@@ -196,7 +198,7 @@ describe 'ClientStub' do
       before(:each) do
         server_port = create_test_server
         host = "localhost:#{server_port}"
-        @stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure)
+        @stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
         @metadata = { k1: 'v1', k2: 'v2' }
         @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
         @resp = 'a_reply'
@@ -262,7 +264,7 @@ describe 'ClientStub' do
         server_port = create_test_server
         host = "localhost:#{server_port}"
         th = run_server_streamer(@sent_msg, @replys, @pass)
-        stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
         expect(get_responses(stub).collect { |r| r }).to eq(@replys)
         th.join
       end
@@ -271,7 +273,7 @@ describe 'ClientStub' do
         server_port = create_test_server
         host = "localhost:#{server_port}"
         th = run_server_streamer(@sent_msg, @replys, @fail)
-        stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
         e = get_responses(stub)
         expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
         th.join
@@ -282,7 +284,7 @@ describe 'ClientStub' do
         host = "localhost:#{server_port}"
         th = run_server_streamer(@sent_msg, @replys, @fail,
                                  k1: 'v1', k2: 'v2')
-        stub = GRPC::ClientStub.new(host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
         e = get_responses(stub)
         expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
         th.join
@@ -327,7 +329,7 @@ describe 'ClientStub' do
       it 'supports sending all the requests first', bidi: true do
         th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
                                                    @pass)
-        stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
         e = get_responses(stub)
         expect(e.collect { |r| r }).to eq(@replys)
         th.join
@@ -335,7 +337,7 @@ describe 'ClientStub' do
 
       it 'supports client-initiated ping pong', bidi: true do
         th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true)
-        stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
         e = get_responses(stub)
         expect(e.collect { |r| r }).to eq(@sent_msgs)
         th.join
@@ -343,7 +345,7 @@ describe 'ClientStub' do
 
       it 'supports a server-initiated ping pong', bidi: true do
         th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false)
-        stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure)
+        stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
         e = get_responses(stub)
         expect(e.collect { |r| r }).to eq(@sent_msgs)
         th.join
@@ -372,26 +374,6 @@ describe 'ClientStub' do
 
       it_behaves_like 'bidi streaming'
     end
-
-    describe 'without enough time to run' do
-      before(:each) do
-        @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
-        @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
-        server_port = create_test_server
-        @host = "localhost:#{server_port}"
-      end
-
-      it 'should fail with DeadlineExceeded', bidi: true do
-        @server.start
-        stub = GRPC::ClientStub.new(@host, @cq, :this_channel_is_insecure)
-        blk = proc do
-          e = stub.bidi_streamer(@method, @sent_msgs, noop, noop,
-                                 deadline: from_relative_time(0.001))
-          e.collect { |r| r }
-        end
-        expect(&blk).to raise_error GRPC::BadStatus, /Deadline Exceeded/
-      end
-    end
   end
 
   def run_server_streamer(expected_input, replys, status, **kw)
@@ -460,21 +442,18 @@ describe 'ClientStub' do
   end
 
   def create_test_server
-    @server_queue = GRPC::Core::CompletionQueue.new
-    @server = GRPC::Core::Server.new(@server_queue, nil)
+    @server = GRPC::Core::Server.new(nil)
     @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
   end
 
   def expect_server_to_be_invoked(notifier)
     @server.start
     notifier.notify(nil)
-    server_tag = Object.new
-    recvd_rpc = @server.request_call(@server_queue, server_tag,
-                                     INFINITE_FUTURE)
+    recvd_rpc = @server.request_call
     recvd_call = recvd_rpc.call
     recvd_call.metadata = recvd_rpc.metadata
-    recvd_call.run_batch(@server_queue, server_tag, Time.now + 2,
-                         SEND_INITIAL_METADATA => nil)
-    GRPC::ActiveCall.new(recvd_call, @server_queue, noop, noop, INFINITE_FUTURE)
+    recvd_call.run_batch(SEND_INITIAL_METADATA => nil)
+    GRPC::ActiveCall.new(recvd_call, noop, noop, INFINITE_FUTURE,
+                         metadata_received: true)
   end
 end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index 943502cea2313a6e92972b7dafdc0bfdd93a4118..31157cf161ef77ec86a8aa2368e2aef9e3e626a3 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -95,7 +95,7 @@ class FailingService
   def initialize(_default_var = 'ignored')
     @details = 'app error'
     @code = 101
-    @md = { failed_method: 'an_rpc' }
+    @md = { 'failed_method' => 'an_rpc' }
   end
 
   def an_rpc(_req, _call)
@@ -135,8 +135,6 @@ describe GRPC::RpcServer do
     @pass = 0
     @fail = 1
     @noop = proc { |x| x }
-
-    @server_queue = GRPC::Core::CompletionQueue.new
   end
 
   describe '#new' do
@@ -148,28 +146,6 @@ describe GRPC::RpcServer do
       expect(&blk).not_to raise_error
     end
 
-    it 'can be created with a completion queue override' do
-      opts = {
-        server_args: { a_channel_arg: 'an_arg' },
-        completion_queue_override: @server_queue
-      }
-      blk = proc do
-        RpcServer.new(**opts)
-      end
-      expect(&blk).not_to raise_error
-    end
-
-    it 'cannot be created with a bad completion queue override' do
-      blk = proc do
-        opts = {
-          server_args: { a_channel_arg: 'an_arg' },
-          completion_queue_override: Object.new
-        }
-        RpcServer.new(**opts)
-      end
-      expect(&blk).to raise_error
-    end
-
     it 'cannot be created with invalid ServerCredentials' do
       blk = proc do
         opts = {
@@ -294,7 +270,6 @@ describe GRPC::RpcServer do
     context 'with no connect_metadata' do
       before(:each) do
         server_opts = {
-          completion_queue_override: @server_queue,
           poll_period: 1
         }
         @srv = RpcServer.new(**server_opts)
@@ -309,8 +284,7 @@ describe GRPC::RpcServer do
         @srv.wait_till_running
         req = EchoMsg.new
         blk = proc do
-          cq = GRPC::Core::CompletionQueue.new
-          stub = GRPC::ClientStub.new(@host, cq, :this_channel_is_insecure,
+          stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure,
                                       **client_opts)
           stub.request_response('/unknown', req, marshal, unmarshal)
         end
@@ -325,8 +299,7 @@ describe GRPC::RpcServer do
         @srv.wait_till_running
         req = EchoMsg.new
         blk = proc do
-          cq = GRPC::Core::CompletionQueue.new
-          stub = GRPC::ClientStub.new(@host, cq, :this_channel_is_insecure,
+          stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure,
                                       **client_opts)
           stub.request_response('/an_rpc', req, marshal, unmarshal)
         end
@@ -422,7 +395,6 @@ describe GRPC::RpcServer do
       it 'should return RESOURCE_EXHAUSTED on too many jobs', server: true do
         opts = {
           server_args: { a_channel_arg: 'an_arg' },
-          completion_queue_override: @server_queue,
           pool_size: 1,
           poll_period: 1,
           max_waiting_requests: 0
@@ -466,7 +438,6 @@ describe GRPC::RpcServer do
       end
       before(:each) do
         server_opts = {
-          completion_queue_override: @server_queue,
           poll_period: 1,
           connect_md_proc: test_md_proc
         }
@@ -502,7 +473,6 @@ describe GRPC::RpcServer do
     context 'with trailing metadata' do
       before(:each) do
         server_opts = {
-          completion_queue_override: @server_queue,
           poll_period: 1
         }
         @srv = RpcServer.new(**server_opts)
@@ -545,7 +515,7 @@ describe GRPC::RpcServer do
         op = stub.an_rpc(req, return_op: true, metadata: { k1: 'v1', k2: 'v2' })
         expect(op.metadata).to be nil
         expect(op.execute).to be_a(EchoMsg)
-        expect(op.metadata).to eq(wanted_trailers)
+        expect(op.trailing_metadata).to eq(wanted_trailers)
         @srv.stop
         t.join
       end
diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb
index f3d121a31ef1b8ac8c5852bec67d5902b1656da5..de11c9fedf7512590ff29b819f6431e8986dd231 100644
--- a/src/ruby/spec/pb/health/checker_spec.rb
+++ b/src/ruby/spec/pb/health/checker_spec.rb
@@ -168,11 +168,9 @@ describe Grpc::Health::Checker do
     CheckerStub = Grpc::Health::Checker.rpc_stub_class
 
     before(:each) do
-      @server_queue = GRPC::Core::CompletionQueue.new
       server_host = '0.0.0.0:0'
       @client_opts = { channel_override: @ch }
       server_opts = {
-        completion_queue_override: @server_queue,
         poll_period: 1
       }
       @srv = RpcServer.new(**server_opts)
diff --git a/src/ruby/spec/server_spec.rb b/src/ruby/spec/server_spec.rb
index 439b19fb8dbc1f0d189ab41ad35a1a8a7604f244..003d8f69d570247c52079c9e938c251d7d5c73d8 100644
--- a/src/ruby/spec/server_spec.rb
+++ b/src/ruby/spec/server_spec.rb
@@ -43,19 +43,15 @@ describe Server do
     GRPC::Core::ServerCredentials.new(*load_test_certs)
   end
 
-  before(:each) do
-    @cq = GRPC::Core::CompletionQueue.new
-  end
-
   describe '#start' do
     it 'runs without failing' do
-      blk = proc { Server.new(@cq, nil).start }
+      blk = proc { Server.new(nil).start }
       expect(&blk).to_not raise_error
     end
 
     it 'fails if the server is closed' do
-      s = Server.new(@cq, nil)
-      s.close(@cq)
+      s = Server.new(nil)
+      s.close
       expect { s.start }.to raise_error(RuntimeError)
     end
   end
@@ -63,19 +59,19 @@ describe Server do
   describe '#destroy' do
     it 'destroys a server ok' do
       s = start_a_server
-      blk = proc { s.destroy(@cq) }
+      blk = proc { s.destroy }
       expect(&blk).to_not raise_error
     end
 
     it 'can be called more than once without error' do
       s = start_a_server
       begin
-        blk = proc { s.destroy(@cq) }
+        blk = proc { s.destroy }
         expect(&blk).to_not raise_error
         blk.call
         expect(&blk).to_not raise_error
       ensure
-        s.close(@cq)
+        s.close
       end
     end
   end
@@ -84,7 +80,7 @@ describe Server do
     it 'closes a server ok' do
       s = start_a_server
       begin
-        blk = proc { s.close(@cq) }
+        blk = proc { s.close }
         expect(&blk).to_not raise_error
       ensure
         s.close(@cq)
@@ -93,7 +89,7 @@ describe Server do
 
     it 'can be called more than once without error' do
       s = start_a_server
-      blk = proc { s.close(@cq) }
+      blk = proc { s.close }
       expect(&blk).to_not raise_error
       blk.call
       expect(&blk).to_not raise_error
@@ -104,16 +100,16 @@ describe Server do
     describe 'for insecure servers' do
       it 'runs without failing' do
         blk = proc do
-          s = Server.new(@cq, nil)
+          s = Server.new(nil)
           s.add_http2_port('localhost:0', :this_port_is_insecure)
-          s.close(@cq)
+          s.close
         end
         expect(&blk).to_not raise_error
       end
 
       it 'fails if the server is closed' do
-        s = Server.new(@cq, nil)
-        s.close(@cq)
+        s = Server.new(nil)
+        s.close
         blk = proc do
           s.add_http2_port('localhost:0', :this_port_is_insecure)
         end
@@ -125,16 +121,16 @@ describe Server do
       let(:cert) { create_test_cert }
       it 'runs without failing' do
         blk = proc do
-          s = Server.new(@cq, nil)
+          s = Server.new(nil)
           s.add_http2_port('localhost:0', cert)
-          s.close(@cq)
+          s.close
         end
         expect(&blk).to_not raise_error
       end
 
       it 'fails if the server is closed' do
-        s = Server.new(@cq, nil)
-        s.close(@cq)
+        s = Server.new(nil)
+        s.close
         blk = proc { s.add_http2_port('localhost:0', cert) }
         expect(&blk).to raise_error(RuntimeError)
       end
@@ -142,8 +138,8 @@ describe Server do
   end
 
   shared_examples '#new' do
-    it 'takes a completion queue with nil channel args' do
-      expect { Server.new(@cq, nil) }.to_not raise_error
+    it 'takes nil channel args' do
+      expect { Server.new(nil) }.to_not raise_error
     end
 
     it 'does not take a hash with bad keys as channel args' do
@@ -194,14 +190,14 @@ describe Server do
 
   describe '#new with an insecure channel' do
     def construct_with_args(a)
-      proc { Server.new(@cq, a) }
+      proc { Server.new(a) }
     end
 
     it_behaves_like '#new'
   end
 
   def start_a_server
-    s = Server.new(@cq, nil)
+    s = Server.new(nil)
     s.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     s.start
     s
diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb
index dca7fd7e72c39e7852c6cc3b1e5b3fe139f54898..e457ec09ddbda11d56989274700df2790c7e5a43 100644
--- a/src/ruby/tools/version.rb
+++ b/src/ruby/tools/version.rb
@@ -29,6 +29,6 @@
 
 module GRPC
   module Tools
-    VERSION = '0.15.0.dev'
+    VERSION = '1.1.0.dev'
   end
 end
diff --git a/templates/CMakeLists.txt.template b/templates/CMakeLists.txt.template
new file mode 100644
index 0000000000000000000000000000000000000000..52e8b866be1b6a645349101fbd0b03a3a712cf34
--- /dev/null
+++ b/templates/CMakeLists.txt.template
@@ -0,0 +1,152 @@
+%YAML 1.2
+--- |
+  # GRPC global cmake file
+  # This currently builds C and C++ code.
+  # This file has been automatically generated from a template file.
+  # Please look at the templates directory instead.
+  # This file can be regenerated from the template by running
+  # tools/buildgen/generate_projects.sh
+  #
+  # Additionally, this is currently very experimental, and unsupported.
+  # Further work will happen on that file.
+  #
+  # Copyright 2015, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  <%!
+  def get_deps(target_dict):
+    deps = []
+    if target_dict.get('build', None) in ['protoc']:
+      deps.append("libprotoc")
+    if target_dict.get('secure', False):
+      deps = ["ssl"]
+    if target_dict['name'] in ['grpc++', 'grpc++_unsecure', 'grpc++_codegen_lib']:
+      deps.append("libprotobuf")
+    elif target_dict['name'] in ['grpc']:
+      deps.append("zlibstatic")
+    for d in target_dict.get('deps', []):
+      deps.append(d)
+    return deps
+  %>
+
+  cmake_minimum_required(VERSION 2.8)
+
+  set(PACKAGE_NAME      "grpc")
+  set(PACKAGE_VERSION   "${settings.core_version}")
+  set(PACKAGE_STRING    "<%text>${PACKAGE_NAME} ${PACKAGE_VERSION}</%text>")
+  set(PACKAGE_TARNAME   "<%text>${PACKAGE_NAME}-${PACKAGE_VERSION}</%text>")
+  set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/")
+  project(<%text>${PACKAGE_NAME}</%text> C CXX)
+
+  if(NOT BORINGSSL_ROOT_DIR)
+    set(BORINGSSL_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/boringssl)
+  endif()
+  if(NOT PROTOBUF_ROOT_DIR)
+    set(PROTOBUF_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/protobuf)
+  endif()
+  if(NOT ZLIB_ROOT_DIR)
+    set(ZLIB_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/zlib)
+  endif()
+
+  # Building the protobuf tests require gmock what is not part of a standard protobuf checkout.
+  # Disable them unless they are explicitly requested from the cmake command line (when we assume
+  # gmock is downloaded to the right location inside protobuf).
+  if(NOT protobuf_BUILD_TESTS)
+    set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
+  endif()
+
+  add_subdirectory(<%text>${BORINGSSL_ROOT_DIR}</%text> third_party/boringssl)
+  add_subdirectory(<%text>${PROTOBUF_ROOT_DIR}</%text>/cmake third_party/protobuf)
+  add_subdirectory(<%text>${ZLIB_ROOT_DIR}</%text> third_party/zlib)
+
+  set(CMAKE_C_FLAGS   "<%text>${CMAKE_C_FLAGS}</%text>   -std=c11")
+  set(CMAKE_CXX_FLAGS "<%text>${CMAKE_CXX_FLAGS}</%text> -std=c++11")
+
+  % for lib in libs:
+  % if lib.build in ["all", "protoc", "tool"]:
+    ${cc_library(lib)}
+  % endif
+  % endfor
+
+  % for tgt in targets:
+  % if tgt.build in ["all", "protoc", "tool"]:
+  ${cc_binary(tgt)}
+  % endif
+  % endfor
+
+  <%def name="cc_library(lib)">
+  add_library(${lib.name}
+  % for src in lib.src:
+    ${src}
+  % endfor
+  )
+
+  target_include_directories(${lib.name}
+    PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
+    PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/include
+    PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
+    PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
+    PRIVATE <%text>${ZLIB_ROOT_DIR}</%text>
+    PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/zlib
+  )
+
+  % if len(get_deps(lib)) > 0:
+  target_link_libraries(${lib.name}
+  % for dep in get_deps(lib):
+    ${dep}
+  % endfor
+  )
+  % endif
+  </%def>
+
+  <%def name="cc_binary(tgt)">
+  add_executable(${tgt.name}
+  % for src in tgt.src:
+    ${src}
+  % endfor
+  )
+
+  target_include_directories(${tgt.name}
+    PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
+    PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/include
+    PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
+    PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
+    PRIVATE <%text>${ZLIB_ROOT_DIR}</%text>
+    PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}</%text>/third_party/zlib
+  )
+
+  % if len(get_deps(tgt)) > 0:
+  target_link_libraries(${tgt.name}
+  % for dep in get_deps(tgt):
+    ${dep}
+  % endfor
+  )
+  % endif
+  </%def>
+
diff --git a/templates/Makefile.template b/templates/Makefile.template
index 0e3b9926b7d1cb4ffe691c4dd5fdb00ffd816298..0cbd8bfdd552d5928efa1afdc172381411f1aa6e 100644
--- a/templates/Makefile.template
+++ b/templates/Makefile.template
@@ -380,7 +380,6 @@
   PROTOC_CHECK_VERSION_CMD = protoc --version | grep -q libprotoc.3
   DTRACE_CHECK_CMD = which dtrace > /dev/null
   SYSTEMTAP_HEADERS_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/systemtap.c $(LDFLAGS)
-  ZOOKEEPER_CHECK_CMD = $(CC) $(CPPFLAGS) $(CFLAGS) -o $(TMPOUT) test/build/zookeeper.c $(LDFLAGS) -lzookeeper_mt
 
   ifndef REQUIRE_CUSTOM_LIBRARIES_$(CONFIG)
   HAS_SYSTEM_PERFTOOLS ?= $(shell $(PERFTOOLS_CHECK_CMD) 2> /dev/null && echo true || echo false)
@@ -448,8 +447,6 @@
   CACHE_MK += HAS_SYSTEMTAP = true,
   endif
 
-  HAS_ZOOKEEPER = $(shell $(ZOOKEEPER_CHECK_CMD) 2> /dev/null && echo true || echo false)
-
   # Note that for testing purposes, one can do:
   #   make HAS_EMBEDDED_OPENSSL_ALPN=false
   # to emulate the fact we do not have OpenSSL in the third_party folder.
@@ -593,14 +590,6 @@
   PC_LIB = -lgrpc
   GRPC_UNSECURE_PC_FILE := $(PC_TEMPLATE)
 
-  # grpc_zookeeper .pc file
-  PC_NAME = gRPC zookeeper
-  PC_DESCRIPTION = gRPC's zookeeper plugin
-  PC_CFLAGS =
-  PC_REQUIRES_PRIVATE =
-  PC_LIBS_PRIVATE = -lzookeeper_mt
-  GRPC_ZOOKEEPER_PC_FILE := $(PC_TEMPLATE)
-
   PROTOBUF_PKG_CONFIG = false
 
   PC_REQUIRES_GRPCXX =
@@ -796,7 +785,6 @@
   	$(PERFTOOLS_CHECK_CMD) || true
   	$(PROTOBUF_CHECK_CMD) || true
   	$(PROTOC_CHECK_VERSION_CMD) || true
-  	$(ZOOKEEPER_CHECK_CMD) || true
 
   third_party/protobuf/configure:
   	$(E) "[AUTOGEN] Preparing protobuf"
@@ -815,7 +803,7 @@
 
   static: static_c static_cxx
 
-  static_c: pc_c pc_c_unsecure cache.mk pc_c_zookeeper\
+  static_c: pc_c pc_c_unsecure cache.mk \
   % for lib in libs:
   % if 'Makefile' in lib.get('build_system', ['Makefile']):
   % if lib.build == 'all' and lib.language == 'c' and not lib.get('external_deps', None):
@@ -823,7 +811,6 @@
   % endif
   % endif
   % endfor
-   static_zookeeper_libs
 
 
   static_cxx: pc_cxx pc_cxx_unsecure cache.mk \
@@ -838,7 +825,7 @@
 
   shared: shared_c shared_cxx
 
-  shared_c: pc_c pc_c_unsecure cache.mk pc_c_zookeeper\
+  shared_c: pc_c pc_c_unsecure cache.mk\
   % for lib in libs:
   % if 'Makefile' in lib.get('build_system', ['Makefile']):
   % if lib.build == 'all' and lib.language == 'c' and not lib.get('external_deps', None):
@@ -846,7 +833,6 @@
   % endif
   % endif
   % endfor
-   shared_zookeeper_libs
 
   shared_cxx: pc_cxx pc_cxx_unsecure cache.mk\
   % for lib in libs:
@@ -867,33 +853,6 @@
   % endif
   % endfor
 
-  ifeq ($(HAS_ZOOKEEPER),true)
-  static_zookeeper_libs:\
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.build == 'all' and lib.language == 'c' and 'zookeeper' in lib.get('external_deps', []):
-   $(LIBDIR)/$(CONFIG)/lib${lib.name}.a\
-  % endif
-  % endif
-  % endfor
-
-  shared_zookeeper_libs:\
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.build == 'all' and lib.language == 'c' and 'zookeeper' in lib.get('external_deps', []):
-   $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT)\
-  % endif
-  % endif
-  % endfor
-
-  else
-
-  static_zookeeper_libs:
-
-  shared_zookeeper_libs:
-
-  endif
-
   grpc_csharp_ext: shared_csharp
 
   plugins: $(PROTOC_PLUGINS)
@@ -913,12 +872,6 @@
 
   pc_c_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc
 
-  ifeq ($(HAS_ZOOKEEPER),true)
-  pc_c_zookeeper: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_zookeeper.pc
-  else
-  pc_c_zookeeper:
-  endif
-
   pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc
 
   pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc
@@ -932,7 +885,7 @@
   % endif
   % endif
   % endfor
-  
+
   else
   privatelibs_cxx: \
   % for lib in libs:
@@ -942,26 +895,11 @@
   % endif
   % endif
   % endfor
-  
-  endif
-
-
-  ifeq ($(HAS_ZOOKEEPER),true)
-  privatelibs_zookeeper: \
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.build == 'private' and lib.language == 'c++' and zookeeper in lib.get('external_deps', []):
-   $(LIBDIR)/$(CONFIG)/lib${lib.name}.a\
-  % endif
-  % endif
-  % endfor
 
-  else
-  privatelibs_zookeeper:
   endif
 
 
-  buildtests: buildtests_c buildtests_cxx buildtests_zookeeper
+  buildtests: buildtests_c buildtests_cxx
 
   buildtests_c: privatelibs_c <%text>\</%text>
   % for tgt in targets:
@@ -972,40 +910,27 @@
 
 
   ifeq ($(EMBED_OPENSSL),true)
-  buildtests_cxx: buildtests_zookeeper privatelibs_cxx <%text>\</%text>
+  buildtests_cxx: privatelibs_cxx <%text>\</%text>
   % for tgt in targets:
   % if tgt.build == 'test' and tgt.language == 'c++' and not tgt.get('external_deps', None):
     $(BINDIR)/$(CONFIG)/${tgt.name} <%text>\</%text>
   % endif
   % endfor
-  
+
   else
-  buildtests_cxx: buildtests_zookeeper privatelibs_cxx <%text>\</%text>
+  buildtests_cxx: privatelibs_cxx <%text>\</%text>
   % for tgt in targets:
   % if tgt.build == 'test' and tgt.language == 'c++' and not tgt.get('external_deps', None) and not tgt.boringssl:
     $(BINDIR)/$(CONFIG)/${tgt.name} <%text>\</%text>
   % endif
   % endfor
-  
-  endif
-
-
-  ifeq ($(HAS_ZOOKEEPER),true)
-  buildtests_zookeeper: privatelibs_zookeeper <%text>\</%text>
-  % for tgt in targets:
-  % if tgt.build == 'test' and tgt.language == 'c++' and 'zookeeper' in tgt.get('external_deps', []):
-   $(BINDIR)/$(CONFIG)/${tgt.name} <%text>\</%text>
-  % endif
-  % endfor
 
-  else
-  buildtests_zookeeper:
   endif
 
 
-  test: test_c test_cxx test_zookeeper
+  test: test_c test_cxx
 
-  flaky_test: flaky_test_c flaky_test_cxx flaky_test_zookeeper
+  flaky_test: flaky_test_c flaky_test_cxx
 
   test_c: buildtests_c
   % for tgt in targets:
@@ -1025,7 +950,7 @@
   % endfor
 
 
-  test_cxx: test_zookeeper buildtests_cxx
+  test_cxx: buildtests_cxx
   % for tgt in targets:
   % if tgt.build == 'test' and tgt.get('run', True) and tgt.language == 'c++' and not tgt.get('flaky', False) and not tgt.get('external_deps', None):
   	$(E) "[RUN]     Testing ${tgt.name}"
@@ -1043,30 +968,6 @@
   % endfor
 
 
-  ifeq ($(HAS_ZOOKEEPER),true)
-  test_zookeeper: buildtests_zookeeper
-  % for tgt in targets:
-  % if tgt.build == 'test' and tgt.get('run', True) and tgt.language == 'c++' and not tgt.get('flaky', False) and 'zookeeper' in tgt.get('external_deps', []):
-  	$(E) "[RUN]     Testing ${tgt.name}"
-  	$(Q) $(BINDIR)/$(CONFIG)/${tgt.name} || ( echo test ${tgt.name} failed ; exit 1 )
-  % endif
-  % endfor
-
-
-  flaky_test_zookeeper: buildtests_zookeeper
-  % for tgt in targets:
-  % if tgt.build == 'test' and tgt.get('run', True) and tgt.language == 'c++' and tgt.get('flaky', False) and 'zookeeper' in tgt.get('external_deps', []):
-  	$(E) "[RUN]     Testing ${tgt.name}"
-  	$(Q) $(BINDIR)/$(CONFIG)/${tgt.name} || ( echo test ${tgt.name} failed ; exit 1 )
-  % endif
-  % endfor
-
-  else
-  test_zookeeper:
-  flaky_test_zookeeper:
-  endif
-
-
   test_python: static_c
   	$(E) "[RUN]     Testing python code"
   	$(Q) tools/run_tests/run_tests.py -lpython -c$(CONFIG)
@@ -1126,20 +1027,6 @@
   % endif
   % endif
   % endfor
-  ifeq ($(HAS_ZOOKEEPER),true)
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.language == "c":
-  % if lib.build == "all":
-  % if 'zookeeper' in lib.get('external_deps', []):
-  	$(E) "[STRIP]   Stripping lib${lib.name}.a"
-  	$(Q) $(STRIP) $(LIBDIR)/$(CONFIG)/lib${lib.name}.a
-  % endif
-  % endif
-  % endif
-  % endif
-  % endfor
-  endif
   endif
 
   strip-static_cxx: static_cxx
@@ -1170,20 +1057,6 @@
   % endif
   % endif
   % endfor
-  ifeq ($(HAS_ZOOKEEPER),true)
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.language == "c":
-  % if lib.build == "all":
-  % if 'zookeeper' in lib.get('external_deps', []):
-  	$(E) "[STRIP]   Stripping $(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT)"
-  	$(Q) $(STRIP) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT)
-  % endif
-  % endif
-  % endif
-  % endif
-  % endfor
-  endif
   endif
 
   strip-shared_cxx: shared_cxx
@@ -1228,11 +1101,6 @@
   	$(Q) mkdir -p $(@D)
   	$(Q) echo "$(GRPC_UNSECURE_PC_FILE)" | tr , '\n' >$@
 
-  $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_zookeeper.pc:
-  	$(E) "[MAKE]    Generating $@"
-  	$(Q) mkdir -p $(@D)
-  	$(Q) echo "$(GRPC_ZOOKEEPER_PC_FILE)" | tr , '\n' >$@
-
   $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc:
   	$(E) "[MAKE]    Generating $@"
   	$(Q) mkdir -p $(@D)
@@ -1331,21 +1199,6 @@
   % endif
   % endif
   % endfor
-  ifeq ($(HAS_ZOOKEEPER),true)
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.language == "c":
-  % if lib.build == "all":
-  % if 'zookeeper' in lib.get('external_deps', []):
-  	$(E) "[INSTALL] Installing lib${lib.name}.a"
-  	$(Q) $(INSTALL) -d $(prefix)/lib
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/lib${lib.name}.a $(prefix)/lib/lib${lib.name}.a
-  % endif
-  % endif
-  % endif
-  % endif
-  % endfor
-  endif
 
   install-static_cxx: static_cxx strip-static_cxx install-pkg-config_cxx
   % for lib in libs:
@@ -1380,27 +1233,6 @@
   % endif
   % endif
   % endfor
-  ifeq ($(HAS_ZOOKEEPER),true)
-  % for lib in libs:
-  % if 'Makefile' in lib.get('build_system', ['Makefile']):
-  % if lib.language == lang_filter:
-  % if lib.build == "all":
-  % if 'zookeeper' in lib.get('external_deps', []):
-  	$(E) "[INSTALL] Installing $(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT)"
-  	$(Q) $(INSTALL) -d $(prefix)/lib
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/$(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/$(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT)
-  ifeq ($(SYSTEM),MINGW32)
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/lib${lib.name}-imp.a $(prefix)/lib/lib${lib.name}-imp.a
-  else ifneq ($(SYSTEM),Darwin)
-  	$(Q) ln -sf $(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/lib${lib.name}.so.${settings.core_version.major}
-  	$(Q) ln -sf $(SHARED_PREFIX)${lib.name}$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/lib${lib.name}.so
-  endif
-  % endif
-  % endif
-  % endif
-  % endif
-  % endfor
-  endif
   ifneq ($(SYSTEM),MINGW32)
   ifneq ($(SYSTEM),Darwin)
   	$(Q) ldconfig || true
@@ -1430,14 +1262,11 @@
   % endfor
   endif
 
-  install-pkg-config_c: pc_c pc_c_unsecure pc_c_zookeeper
+  install-pkg-config_c: pc_c pc_c_unsecure
   	$(E) "[INSTALL] Installing C pkg-config files"
   	$(Q) $(INSTALL) -d $(prefix)/lib/pkgconfig
   	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(prefix)/lib/pkgconfig/grpc.pc
   	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(prefix)/lib/pkgconfig/grpc_unsecure.pc
-  ifeq ($(HAS_ZOOKEEPER),true)
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_zookeeper.pc $(prefix)/lib/pkgconfig/grpc_zookeeper.pc
-  endif
 
   install-pkg-config_cxx: pc_cxx pc_cxx_unsecure
   	$(E) "[INSTALL] Installing C++ pkg-config files"
@@ -1645,9 +1474,6 @@
       for src in lib.src:
         sources_that_don_t_need_openssl.add(src)
 
-    if 'zookeeper' in lib.get('external_deps', []):
-      libs = libs + ' -lzookeeper_mt'
-
     if lib.get('secure', 'check') == True or lib.get('secure', 'check') == 'check':
       lib_deps = lib_deps + ' $(OPENSSL_DEP)'
       mingw_lib_deps = mingw_lib_deps + ' $(OPENSSL_DEP)'
@@ -1802,9 +1628,6 @@
   % for dep in tgt.deps:
    $(LIBDIR)/$(CONFIG)/lib${dep}.a\
   % endfor
-  % if 'zookeeper' in tgt.get('external_deps', []):
-   -lzookeeper_mt\
-  % endif
   % if tgt.language == "c++":
   % if tgt.build == 'protoc':
    $(HOST_LDLIBSXX) $(HOST_LDLIBS_PROTOC)\
diff --git a/templates/composer.json.template b/templates/composer.json.template
index 7d2029c218f9974ce45b224ad4f520e43fa7f6e4..07ab1f20ebab21b2acc037ed9ac1ed2a732af81c 100644
--- a/templates/composer.json.template
+++ b/templates/composer.json.template
@@ -7,16 +7,12 @@
     "keywords": ["rpc"],
     "homepage": "http://grpc.io",
     "license": "BSD-3-Clause",
-    "repositories": [
-      {
-        "type": "vcs",
-        "url": "https://github.com/stanley-cheung/Protobuf-PHP"
-      }
-    ],
     "require": {
       "php": ">=5.5.0",
-      "datto/protobuf-php": "dev-master",
-      "google/auth": "v0.7"
+      "stanley-cheung/protobuf-php": "dev-master"
+    },
+    "require-dev": {
+      "google/auth": "v0.9"
     },
     "autoload": {
       "psr-4": {
diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template
new file mode 100644
index 0000000000000000000000000000000000000000..aefe6e965cc24b3505d779de22e407ad8a08b697
--- /dev/null
+++ b/templates/gRPC-Core.podspec.template
@@ -0,0 +1,157 @@
+%YAML 1.2
+--- |
+  # GRPC CocoaPods podspec
+  # This file has been automatically generated from a template file. Please make modifications to
+  # `templates/gRPC-Core.podspec.template` instead. This file can be regenerated from the template by
+  # running `tools/buildgen/generate_projects.sh`.
+
+  # Copyright 2015, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  <%!
+  def grpc_private_files(libs):
+    out = []
+    for lib in libs:
+      if lib.name in ("grpc", "gpr"):
+        out += lib.get('headers', [])
+        out += lib.get('src', [])
+    return out;
+
+  def grpc_public_headers(libs):
+    out = []
+    for lib in libs:
+      if lib.name in ("grpc", "gpr"):
+        out += lib.get('public_headers', [])
+    return out
+
+  def grpc_private_headers(libs):
+    out = []
+    for lib in libs:
+      if lib.name in ("grpc", "gpr"):
+        out += lib.get('headers', [])
+    return out
+
+  def ruby_multiline_list(files, indent):
+    return (',\n' + indent*' ').join('\'%s\'' % f for f in files)
+  %>
+  Pod::Spec.new do |s|
+    s.name     = 'gRPC-Core'
+    version = '0.14.0'
+    s.version  = version
+    s.summary  = 'Core cross-platform gRPC library, written in C'
+    s.homepage = 'http://www.grpc.io'
+    s.license  = 'New BSD'
+    s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
+
+    s.source = {
+      :git => 'https://github.com/grpc/grpc.git',
+      :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
+      # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules.
+      :submodules => true,
+    }
+
+    s.ios.deployment_target = '7.1'
+    s.osx.deployment_target = '10.9'
+    s.requires_arc = false
+
+    name = 'grpc'
+
+    # When creating a dynamic framework, name it grpc.framework instead of gRPC-Core.framework.
+    # This lets users write their includes like `#include <grpc/grpc.h>` as opposed to `#include
+    # <gRPC-Core/grpc.h>`.
+    s.module_name = name
+
+    # When creating a dynamic framework, copy the headers under `include/grpc/` into the root of
+    # the `Headers/` directory of the framework (i.e., not under `Headers/include/grpc`).
+    #
+    # TODO(jcanizales): Debug why this doesn't work on macOS.
+    s.header_mappings_dir = 'include/grpc'
+
+    # The above has an undesired effect when creating a static library: It forces users to write
+    # includes like `#include <gRPC-Core/grpc.h>`. `s.header_dir` adds a path prefix to that, and
+    # because Cocoapods lets omit the pod name when including headers of static libraries, the
+    # following lets users write `#include <grpc/grpc.h>`.
+    s.header_dir = name
+
+    # The module map created automatically by Cocoapods doesn't work for C libraries like gRPC-Core.
+    s.module_map = 'include/grpc/module.modulemap'
+
+    # To compile the library, we need the user headers search path (quoted includes) to point to the
+    # root of the repo, and the system headers search path (angled includes) to point to `include/`.
+    # Cocoapods effectively clones the repo under `<Podfile dir>/Pods/gRPC-Core/`, and sets a build
+    # variable called `$(PODS_ROOT)` to `<Podfile dir>/Pods/`, so we use that.
+    #
+    # Relying on the file structure under $(PODS_ROOT) isn't officially supported in Cocoapods, as it
+    # is taken as an implementation detail. We've asked for an alternative, and have been told that
+    # what we're doing should keep working: https://github.com/CocoaPods/CocoaPods/issues/4386
+    #
+    # The `src_root` value of `$(PODS_ROOT)/gRPC-Core` assumes Cocoapods is installing this pod from
+    # its remote repo. For local development of this library, enabled by using `:path` in the Podfile,
+    # that assumption is wrong. In such case, the following settings need to be reset with the
+    # appropriate value of `src_root`. This can be accomplished in the `pre_install` hook of the
+    # Podfile; see `src/objective-c/tests/Podfile` for an example.
+    src_root = '$(PODS_ROOT)/gRPC-Core'
+    s.pod_target_xcconfig = {
+      'GRPC_SRC_ROOT' => src_root,
+      'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
+      'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
+      # If we don't set these two settings, `include/grpc/support/time.h` and
+      # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
+      # build.
+      'USE_HEADERMAP' => 'NO',
+      'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+    }
+
+    # Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its
+    # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't
+    # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in
+    # practice). Because we need our `header_mappings_dir` to be `include/grpc/` for the reason
+    # mentioned above, we work around the linter limitation by dividing the pod into two subspecs, one
+    # for public headers and the other for implementation. Each gets its own `header_mappings_dir`,
+    # making the linter happy.
+    #
+    # The list of source files is generated by a template: `templates/gRPC-Core.podspec.template`. It
+    # can be regenerated from the template by running `tools/buildgen/generate_projects.sh`.
+    s.subspec 'Interface' do |ss|
+      ss.header_mappings_dir = 'include/grpc'
+
+      ss.source_files = ${ruby_multiline_list(grpc_public_headers(libs), 22)}
+    end
+    s.subspec 'Implementation' do |ss|
+      ss.header_mappings_dir = '.'
+      ss.libraries = 'z'
+      ss.dependency "#{s.name}/Interface", version
+      ss.dependency 'BoringSSL', '~> 4.0'
+
+      # To save you from scrolling, this is the last part of the podspec.
+      ss.source_files = ${ruby_multiline_list(grpc_private_files(libs), 22)}
+
+      ss.private_header_files = ${ruby_multiline_list(grpc_private_headers(libs), 30)}
+    end
+  end
diff --git a/templates/gRPC.podspec.template b/templates/gRPC.podspec.template
deleted file mode 100644
index 979cb1ef8e8fbb80e1308b84d315ae0f28538975..0000000000000000000000000000000000000000
--- a/templates/gRPC.podspec.template
+++ /dev/null
@@ -1,130 +0,0 @@
-%YAML 1.2
---- |
-  # GRPC CocoaPods podspec
-  # This file has been automatically generated from a template file.
-  # Please look at the templates directory instead.
-  # This file can be regenerated from the template by running
-  # tools/buildgen/generate_projects.sh
-
-  # Copyright 2015, Google Inc.
-  # All rights reserved.
-  #
-  # Redistribution and use in source and binary forms, with or without
-  # modification, are permitted provided that the following conditions are
-  # met:
-  #
-  #     * Redistributions of source code must retain the above copyright
-  # notice, this list of conditions and the following disclaimer.
-  #     * Redistributions in binary form must reproduce the above
-  # copyright notice, this list of conditions and the following disclaimer
-  # in the documentation and/or other materials provided with the
-  # distribution.
-  #     * Neither the name of Google Inc. nor the names of its
-  # contributors may be used to endorse or promote products derived from
-  # this software without specific prior written permission.
-  #
-  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-  <%!
-  def grpc_files(libs):
-    out = []
-    for lib in libs:
-      if lib.name in ("grpc", "gpr"):
-        out += lib.get('headers', [])
-        out += lib.get('public_headers', [])
-        out += lib.get('src', [])
-    return out;
-  
-  def grpc_private_headers(libs):
-    out = []
-    for lib in libs:
-      if lib.name in ("grpc", "gpr"):
-        out += lib.get('headers', [])
-    return out
-  %>
-  Pod::Spec.new do |s|
-    s.name     = 'gRPC'
-    version = '0.14.0'
-    s.version  = version
-    s.summary  = 'gRPC client library for iOS/OSX'
-    s.homepage = 'http://www.grpc.io'
-    s.license  = 'New BSD'
-    s.authors  = { 'The gRPC contributors' => 'grpc-packages@google.com' }
-
-    s.source = { :git => 'https://github.com/grpc/grpc.git',
-                 :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}",
-                 :submodules => true }
-
-  
-    s.ios.deployment_target = '7.1'
-    s.osx.deployment_target = '10.9'
-    s.requires_arc = true
-  
-    objc_dir = 'src/objective-c'
-
-    # Reactive Extensions library for iOS.
-    s.subspec 'RxLibrary' do |ss|
-      src_dir = "#{objc_dir}/RxLibrary"
-      ss.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}"
-      ss.private_header_files = "#{src_dir}/private/*.h"
-      ss.header_mappings_dir = "#{objc_dir}"
-    end
-
-    # Core cross-platform gRPC library, written in C.
-    s.subspec 'C-Core' do |ss|
-      ss.source_files = ${(',\n' + 22*' ').join('\'%s\'' % f for f in grpc_files(libs))}
-
-      ss.private_header_files = ${(',\n' + 30*' ').join('\'%s\'' % f for f in grpc_private_headers(libs))}
-
-      ss.header_mappings_dir = '.'
-      # This isn't officially supported in Cocoapods. We've asked for an alternative:
-      # https://github.com/CocoaPods/CocoaPods/issues/4386
-      ss.xcconfig = {
-        'USE_HEADERMAP' => 'NO',
-        'ALWAYS_SEARCH_USER_PATHS' => 'NO',
-        'USER_HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC"',
-        'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC/include"'
-      }
-
-      ss.requires_arc = false
-      ss.libraries = 'z'
-      ss.dependency 'BoringSSL', '~> 3.0'
-
-      # ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
-    end
-
-    # Objective-C wrapper around the core gRPC library.
-    s.subspec 'GRPCClient' do |ss|
-      src_dir = "#{objc_dir}/GRPCClient"
-      ss.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}"
-      ss.private_header_files = "#{src_dir}/private/*.h"
-      ss.header_mappings_dir = "#{objc_dir}"
-
-      ss.dependency 'gRPC/C-Core'
-      ss.dependency 'gRPC/RxLibrary'
-
-      # Certificates, to be able to establish TLS connections:
-      ss.resource_bundles = { 'gRPCCertificates' => ['etc/roots.pem'] }
-    end
-
-    # RPC library for ProtocolBuffers, based on gRPC
-    s.subspec 'ProtoRPC' do |ss|
-      src_dir = "#{objc_dir}/ProtoRPC"
-      ss.source_files = "#{src_dir}/*.{h,m}"
-      ss.header_mappings_dir = "#{objc_dir}"
-
-      ss.dependency 'gRPC/GRPCClient'
-      ss.dependency 'gRPC/RxLibrary'
-      ss.dependency 'Protobuf', '~> 3.0.0-alpha-4'
-    end
-  end
diff --git a/templates/package.json.template b/templates/package.json.template
index 9d19ca062934f07f4866ce06bcc51b7b19b8a41b..f68f64d04751fd00e59d5ee389afc596ff96bfa0 100644
--- a/templates/package.json.template
+++ b/templates/package.json.template
@@ -61,7 +61,6 @@
     "files": [
       "LICENSE",
       "src/node/README.md",
-      "src/node/health_check",
       "src/proto",
       "etc",
       "src/node/index.js",
diff --git a/templates/package.xml.template b/templates/package.xml.template
index a620a2d6a505deb6417b8bff88eb720e02c320f0..153823ece5b682d259e882dc25f7913e245a37b2 100644
--- a/templates/package.xml.template
+++ b/templates/package.xml.template
@@ -12,19 +12,20 @@
     <email>grpc-packages@google.com</email>
     <active>yes</active>
    </lead>
-   <date>2016-05-19</date>
+   <date>2016-07-13</date>
    <time>16:06:07</time>
    <version>
     <release>${settings.php_version.php()}</release>
     <api>${settings.php_version.php()}</api>
    </version>
    <stability>
-    <release>beta</release>
-    <api>beta</api>
+    <release>stable</release>
+    <api>stable</api>
    </stability>
    <license>BSD</license>
    <notes>
-  - TBD
+  - GA release
+  - Fix shutdown hang problem #4017
    </notes>
    <contents>
     <dir baseinstalldir="/" name="/">
@@ -153,6 +154,7 @@
      <license>BSD</license>
      <notes>
   - Simplify gRPC PHP installation #4517
+  - Wrap gRPC core library version 0.13
      </notes>
     </release>
     <release>
@@ -182,13 +184,14 @@
      <date>2016-04-19</date>
      <license>BSD</license>
      <notes>
+  - wrap grpc C core version 0.14.0
   - destroy grpc_byte_buffer after startBatch #6096
      </notes>
     </release>
     <release>
      <version>
-      <release>0.14.2</release>
-      <api>0.14.2</api>
+      <release>0.15.0</release>
+      <api>0.15.0</api>
      </version>
      <stability>
       <release>beta</release>
@@ -198,6 +201,23 @@
      <license>BSD</license>
      <notes>
   - Updated functions with TSRM macros for ZTS support #6607
+  - Load default roots.pem via grpc_set_ssl_roots_override_callback #6848
+     </notes>
+    </release>
+    <release>
+     <version>
+      <release>1.0.0</release>
+      <api>1.0.0</api>
+     </version>
+     <stability>
+      <release>stable</release>
+      <api>stable</api>
+     </stability>
+     <date>2016-07-13</date>
+     <license>BSD</license>
+     <notes>
+  - GA release
+  - Fix shutdown hang problem #4017
      </notes>
     </release>
    </changelog>
diff --git a/templates/src/csharp/Grpc.Auth/project.json.template b/templates/src/csharp/Grpc.Auth/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..d91bd8ce1df5d254a34a20255443a4b07b40b7b9
--- /dev/null
+++ b/templates/src/csharp/Grpc.Auth/project.json.template
@@ -0,0 +1,43 @@
+%YAML 1.2
+--- |
+  {
+    "version": "${settings.csharp_version}",
+    "title": "gRPC C# Auth",
+    "authors": [ "Google Inc." ],
+    "copyright": "Copyright 2015, Google Inc.",
+    "packOptions": {
+      "summary": "Auth library for C# implementation of gRPC - an RPC library and framework",
+      "description": "Auth library for C# implementation of gRPC - an RPC library and framework. See project site for more info.",
+      "owners": [ "grpc-packages" ],
+      "licenseUrl": "https://github.com/grpc/grpc/blob/master/LICENSE",
+      "projectUrl": "https://github.com/grpc/grpc",
+      "requireLicenseAcceptance": false,
+      "tags": [ "gRPC RPC Protocol HTTP/2 Auth OAuth2" ],
+    },
+    "buildOptions": {
+      "define": [ "SIGNED" ],
+      "keyFile": "../keys/Grpc.snk",
+      "publicSign": true,
+      "xmlDoc": true,
+      "compile": {
+        "includeFiles": [ "../Grpc.Core/Version.cs" ]
+      }
+    },
+    "dependencies": {
+      "Grpc.Core": "${settings.csharp_version}",
+      "Google.Apis.Auth": "1.11.1"
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "net45"
+        ],
+        "dependencies": {
+          "Microsoft.NETCore.Portable.Compatibility": "1.0.1-rc2-24027",
+          "NETStandard.Library": "1.5.0-rc2-24027",
+          "System.Threading.Tasks": "4.0.11-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.Core.Tests/project.json.template b/templates/src/csharp/Grpc.Core.Tests/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..bc9fa3e63a9b5d381c08f31fe7ed79f849546416
--- /dev/null
+++ b/templates/src/csharp/Grpc.Core.Tests/project.json.template
@@ -0,0 +1,24 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True"/>
+    "dependencies": {
+      "Grpc.Core": {
+        "target": "project"
+      },
+      "Newtonsoft.Json": "8.0.3",
+      "NUnit": "3.2.0",
+      "NUnitLite": "3.2.0-*"
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    },
+  }
diff --git a/templates/src/csharp/Grpc.Core/project.json.template b/templates/src/csharp/Grpc.Core/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..cdcebc5303db5e25e7c3726e2b187ef706487c1d
--- /dev/null
+++ b/templates/src/csharp/Grpc.Core/project.json.template
@@ -0,0 +1,50 @@
+%YAML 1.2
+--- |
+  {
+    "version": "${settings.csharp_version}",
+    "title": "gRPC C# Core",
+    "authors": [ "Google Inc." ],
+    "copyright": "Copyright 2015, Google Inc.",
+    "packOptions": {
+      "summary": "Core C# implementation of gRPC - an RPC library and framework",
+      "description": "Core C# implementation of gRPC - an RPC library and framework. See project site for more info.",
+      "owners": [ "grpc-packages" ],
+      "licenseUrl": "https://github.com/grpc/grpc/blob/master/LICENSE",
+      "projectUrl": "https://github.com/grpc/grpc",
+      "requireLicenseAcceptance": false,
+      "tags": [ "gRPC RPC Protocol HTTP/2" ],
+      "files": {
+        "mappings": {
+          "build/net45/": "Grpc.Core.targets",
+          "build/native/bin/windows_x86/": "../nativelibs/windows_x86/grpc_csharp_ext.dll",
+          "build/native/bin/windows_x64/": "../nativelibs/windows_x64/grpc_csharp_ext.dll",
+          "build/native/bin/linux_x86/": "../nativelibs/linux_x86/libgrpc_csharp_ext.so",
+          "build/native/bin/linux_x64/": "../nativelibs/linux_x64/libgrpc_csharp_ext.so",
+          "build/native/bin/macosx_x86/": "../nativelibs/macosx_x86/libgrpc_csharp_ext.dylib",
+          "build/native/bin/macosx_x64/": "../nativelibs/macosx_x64/libgrpc_csharp_ext.dylib"
+        }
+      }
+    },
+    "buildOptions": {
+      "embed": [ "../../../etc/roots.pem" ],
+      "define": [ "SIGNED" ],
+      "keyFile": "../keys/Grpc.snk",
+      "publicSign": true,
+      "xmlDoc": true
+    },
+    "dependencies": {
+      "Ix-Async": "1.2.5"
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027",
+          "System.Threading.Thread": "4.0.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.Examples.MathClient/project.json.template b/templates/src/csharp/Grpc.Examples.MathClient/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..fba401c3a4725f4408a52c1ab492ff8f80d7da61
--- /dev/null
+++ b/templates/src/csharp/Grpc.Examples.MathClient/project.json.template
@@ -0,0 +1,21 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True"/>
+    "dependencies": {
+      "Grpc.Examples": {
+        "target": "project"
+      }
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.Examples.MathServer/project.json.template b/templates/src/csharp/Grpc.Examples.MathServer/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..fba401c3a4725f4408a52c1ab492ff8f80d7da61
--- /dev/null
+++ b/templates/src/csharp/Grpc.Examples.MathServer/project.json.template
@@ -0,0 +1,21 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True"/>
+    "dependencies": {
+      "Grpc.Examples": {
+        "target": "project"
+      }
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.Examples.Tests/project.json.template b/templates/src/csharp/Grpc.Examples.Tests/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..21765f0565c9e2d99d02c01a28526f6d1d2c0327
--- /dev/null
+++ b/templates/src/csharp/Grpc.Examples.Tests/project.json.template
@@ -0,0 +1,23 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True"/>
+    "dependencies": {
+      "Grpc.Examples": {
+        "target": "project"
+      },
+      "NUnit": "3.2.0",
+      "NUnitLite": "3.2.0-*"
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.Examples/project.json.template b/templates/src/csharp/Grpc.Examples/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..715fc08725615af4a84c7034e7a73e3dbc06933e
--- /dev/null
+++ b/templates/src/csharp/Grpc.Examples/project.json.template
@@ -0,0 +1,27 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=False"/>
+    "dependencies": {
+      "Grpc.Core": {
+        "target": "project"
+      },
+      "Google.Protobuf": "3.0.0-beta3"
+    },
+    "frameworks": {
+      "net45": {
+        "frameworkAssemblies": {
+          "System.Runtime": "",
+          "System.IO": ""
+        }
+      },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.HealthCheck.Tests/project.json.template b/templates/src/csharp/Grpc.HealthCheck.Tests/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..79e67226cb4ae68953f7b3341deb6da373d01a5e
--- /dev/null
+++ b/templates/src/csharp/Grpc.HealthCheck.Tests/project.json.template
@@ -0,0 +1,23 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True"/>
+    "dependencies": {
+      "Grpc.HealthCheck": {
+        "target": "project"
+      },
+      "NUnit": "3.2.0",
+      "NUnitLite": "3.2.0-*"
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.HealthCheck/project.json.template b/templates/src/csharp/Grpc.HealthCheck/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..264ed292050c768134416e7263992fec8af70187
--- /dev/null
+++ b/templates/src/csharp/Grpc.HealthCheck/project.json.template
@@ -0,0 +1,46 @@
+%YAML 1.2
+--- |
+  {
+    "version": "${settings.csharp_version}",
+    "title": "gRPC C# Healthchecking",
+    "authors": [ "Google Inc." ],
+    "copyright": "Copyright 2015, Google Inc.",
+    "packOptions": {
+      "summary": "Implementation of gRPC health service",
+      "description": "Example implementation of grpc.health.v1 service that can be used for health-checking.",
+      "owners": [ "grpc-packages" ],
+      "licenseUrl": "https://github.com/grpc/grpc/blob/master/LICENSE",
+      "projectUrl": "https://github.com/grpc/grpc",
+      "requireLicenseAcceptance": false,
+      "tags": [ "gRPC health check" ]
+    },
+    "buildOptions": {
+      "define": [ "SIGNED" ],
+      "keyFile": "../keys/Grpc.snk",
+      "publicSign": true,
+      "xmlDoc": true,
+      "compile": {
+        "includeFiles": [ "../Grpc.Core/Version.cs" ]
+      }
+    },
+    "dependencies": {
+      "Grpc.Core": "${settings.csharp_version}",
+      "Google.Protobuf": "3.0.0-beta3"
+    },
+    "frameworks": {
+      "net45": {
+        "frameworkAssemblies": {
+          "System.Runtime": "",
+          "System.IO": ""
+        }
+      },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.IntegrationTesting.Client/project.json.template b/templates/src/csharp/Grpc.IntegrationTesting.Client/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..10ed5493477ad92978c8109266f402a27d9259f6
--- /dev/null
+++ b/templates/src/csharp/Grpc.IntegrationTesting.Client/project.json.template
@@ -0,0 +1,22 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True,includeData=True"/>
+    "dependencies": {
+      "Grpc.IntegrationTesting": {
+        "target": "project"
+      }
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45",
+          "net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json.template b/templates/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..10ed5493477ad92978c8109266f402a27d9259f6
--- /dev/null
+++ b/templates/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json.template
@@ -0,0 +1,22 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True,includeData=True"/>
+    "dependencies": {
+      "Grpc.IntegrationTesting": {
+        "target": "project"
+      }
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45",
+          "net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.IntegrationTesting.Server/project.json.template b/templates/src/csharp/Grpc.IntegrationTesting.Server/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..10ed5493477ad92978c8109266f402a27d9259f6
--- /dev/null
+++ b/templates/src/csharp/Grpc.IntegrationTesting.Server/project.json.template
@@ -0,0 +1,22 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True,includeData=True"/>
+    "dependencies": {
+      "Grpc.IntegrationTesting": {
+        "target": "project"
+      }
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45",
+          "net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.IntegrationTesting.StressClient/project.json.template b/templates/src/csharp/Grpc.IntegrationTesting.StressClient/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..10ed5493477ad92978c8109266f402a27d9259f6
--- /dev/null
+++ b/templates/src/csharp/Grpc.IntegrationTesting.StressClient/project.json.template
@@ -0,0 +1,22 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True,includeData=True"/>
+    "dependencies": {
+      "Grpc.IntegrationTesting": {
+        "target": "project"
+      }
+    },
+    "frameworks": {
+      "net45": { },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45",
+          "net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/Grpc.IntegrationTesting/project.json.template b/templates/src/csharp/Grpc.IntegrationTesting/project.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..3181511485703ef8ed14de737ccd8ea6d03b3a97
--- /dev/null
+++ b/templates/src/csharp/Grpc.IntegrationTesting/project.json.template
@@ -0,0 +1,38 @@
+%YAML 1.2
+--- |
+  {
+    <%include file="../build_options.include" args="executable=True,includeData=True"/>
+    "dependencies": {
+      "Grpc.Auth": {
+        "target": "project"
+      },
+      "Grpc.Core": {
+        "target": "project"
+      },
+      "Google.Protobuf": "3.0.0-beta3",
+      "CommandLineParser": "1.9.71",
+      "NUnit": "3.2.0",
+      "NUnitLite": "3.2.0-*"
+    },
+    "frameworks": {
+      "net45": {
+        "dependencies": {
+          "Moq": "4.2.1510.2205"
+        },
+        "frameworkAssemblies": {
+          "System.Runtime": "",
+          "System.IO": ""
+        }
+      },
+      "netstandard1.5": {
+        "imports": [
+          "portable-net45",
+          "net45"
+        ],
+        "dependencies": {
+          "NETStandard.Library": "1.5.0-rc2-24027",
+          "System.Linq.Expressions": "4.0.11-rc2-24027"
+        }
+      }
+    }
+  }
diff --git a/templates/src/csharp/build_options.include b/templates/src/csharp/build_options.include
new file mode 100644
index 0000000000000000000000000000000000000000..ae96b94f72827c62af590eb6cd72075478745854
--- /dev/null
+++ b/templates/src/csharp/build_options.include
@@ -0,0 +1,59 @@
+<%page args="executable=False,includeData=False"/>\
+"buildOptions": {
+  % if executable:
+    "emitEntryPoint": true
+  % endif
+  },
+  % if executable:
+  "configurations": {
+    "Debug": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          % if includeData:
+          "include": "data/*",
+          % endif
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    },
+    "Release": {
+      "buildOptions": {
+        "define": [ "SIGNED" ],
+        "keyFile": "../keys/Grpc.snk",
+        "publicSign": true,
+        "xmlDoc": true,
+        "compile": {
+          "includeFiles": [ "../Grpc.Core/Version.cs" ]
+        },
+        "copyToOutput": {
+          % if includeData:
+          "include": "data/*",
+          % endif
+          "mappings": {
+            "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll",
+            "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll",
+            "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so",
+            "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib"
+          }
+        }
+      }
+    }
+  },
+  "runtimes": {
+    "win7-x64": { },
+    "debian.8-x64": { },
+    "osx.10.11-x64": { }
+  },
+  % endif
\ No newline at end of file
diff --git a/templates/src/csharp/build_packages.bat.template b/templates/src/csharp/build_packages.bat.template
index 122435af2e840f59bf77a07aecb9ae8ab440a057..ea2acb661ee9e8b7e050f0c4bb947506389aa549 100644
--- a/templates/src/csharp/build_packages.bat.template
+++ b/templates/src/csharp/build_packages.bat.template
@@ -43,12 +43,12 @@
   
   @rem Collect the artifacts built by the previous build step if running on Jenkins
   @rem TODO(jtattermusch): is there a better way to do this?
-  xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=windows\artifacts\* Grpc.Core\windows_x86${"\\"}
-  xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=windows\artifacts\* Grpc.Core\windows_x64${"\\"}
-  xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=linux\artifacts\* Grpc.Core\linux_x86${"\\"}
-  xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=linux\artifacts\* Grpc.Core\linux_x64${"\\"}
-  xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=macos\artifacts\* Grpc.Core\macosx_x86${"\\"}
-  xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=macos\artifacts\* Grpc.Core\macosx_x64${"\\"}
+  xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=windows\artifacts\* nativelibs\windows_x86${"\\"}
+  xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=windows\artifacts\* nativelibs\windows_x64${"\\"}
+  xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=linux\artifacts\* nativelibs\linux_x86${"\\"}
+  xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=linux\artifacts\* nativelibs\linux_x64${"\\"}
+  xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=macos\artifacts\* nativelibs\macosx_x86${"\\"}
+  xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=macos\artifacts\* nativelibs\macosx_x64${"\\"}
   
   @rem Collect protoc artifacts built by the previous build step
   xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=windows\artifacts\* protoc_plugins\windows_x86${"\\"}
diff --git a/templates/src/node/health_check/package.json.template b/templates/src/node/health_check/package.json.template
new file mode 100644
index 0000000000000000000000000000000000000000..1248ced1e16da04b2be1bca228c03cde5c4199a7
--- /dev/null
+++ b/templates/src/node/health_check/package.json.template
@@ -0,0 +1,31 @@
+%YAML 1.2
+--- |
+  {
+    "name": "grpc-health-check",
+    "version": "${settings.node_version}",
+    "author": "Google Inc.",
+    "description": "Health check service for use with gRPC",
+    "repository": {
+      "type": "git",
+      "url": "https://github.com/grpc/grpc.git"
+    },
+    "bugs": "https://github.com/grpc/grpc/issues",
+    "contributors": [
+      {
+        "name": "Michael Lumish",
+        "email": "mlumish@google.com"
+      }
+    ],
+    "dependencies": {
+      "grpc": "^0.15.0",
+      "lodash": "^3.9.3",
+      "google-protobuf": "^3.0.0-alpha.5"
+    },
+    "files": {
+      "LICENSE",
+      "health.js",
+      "v1"
+    },
+    "main": "src/node/index.js",
+    "license": "BSD-3-Clause"
+  }
diff --git a/templates/src/node/tools/package.json.template b/templates/src/node/tools/package.json.template
index 69ad71a3b83b6291b683213ff21cab7c65c85ea3..02824259767426b1dffaf16b3fbc07894f13d326 100644
--- a/templates/src/node/tools/package.json.template
+++ b/templates/src/node/tools/package.json.template
@@ -36,6 +36,7 @@
       "index.js",
       "bin/protoc.js",
       "bin/protoc_plugin.js",
+      "bin/google/protobuf",
       "LICENSE"
     ],
     "main": "index.js"
diff --git a/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template b/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template
index 84fa5e62bfe77948208b4c4595f3b1c1a23a3e35..d83bccad1db90ad3221d6d969468dc89bb0a1377 100644
--- a/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template
+++ b/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template
@@ -33,29 +33,9 @@
    *
    */
 
+  /* TODO(atash) remove cruft */
   #include <grpc/support/port_platform.h>
 
   #include "imports.generated.h"
 
-  #ifdef GPR_WINDOWS
-
-  %for api in c_apis:
-  ${api.name}_type ${api.name}_import;
-  %endfor
-
-  #ifdef __cplusplus
-  extern "C" {
-  #endif  /* __cpluslus */
-
-  void pygrpc_load_imports(HMODULE library) {
-  %for api in c_apis:
-    ${api.name}_import = (${api.name}_type) GetProcAddress(library, "${api.name}");
-  %endfor
-  }
-
-  #ifdef __cplusplus
-  }
-  #endif  /* __cpluslus */
-
-  #endif /* !GPR_WINDOWS */
 
diff --git a/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template b/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template
index d0f60dc0a507984177f1032916d374b853cbe3d7..b85bc3dbd8b5614dba4b8f97412b33ea4e09c7ed 100644
--- a/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template
+++ b/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template
@@ -33,37 +33,12 @@
    *
    */
 
+  /* TODO(atash) remove cruft */
   #ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_
   #define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_
 
   #include <grpc/support/port_platform.h>
 
-  #ifdef GPR_WINDOWS
-
-  #include <windows.h>
-
-  %for header in sorted(set(api.header for api in c_apis)):
-  #include <${'/'.join(header.split('/')[1:])}>
-  %endfor
-
-  %for api in c_apis:
-  typedef ${api.return_type}(*${api.name}_type)(${api.arguments});
-  extern ${api.name}_type ${api.name}_import;
-  #define ${api.name} ${api.name}_import
-  %endfor
-
-  #ifdef __cplusplus
-  extern "C" {
-  #endif  /* __cpluslus */
-
-  void pygrpc_load_imports(HMODULE library);
-
-  #ifdef __cplusplus
-  }
-  #endif  /* __cpluslus */
-
-  #else /* !GPR_WINDOWS */
-
   #include <grpc/byte_buffer.h>
   #include <grpc/byte_buffer_reader.h>
   #include <grpc/compression.h>
@@ -74,6 +49,4 @@
   #include <grpc/support/time.h>
   #include <grpc/status.h>
 
-  #endif /* !GPR_WINDOWS */
-
   #endif
diff --git a/templates/src/python/grpcio_tests/grpc_version.py.template b/templates/src/python/grpcio_tests/grpc_version.py.template
new file mode 100644
index 0000000000000000000000000000000000000000..1f1bf2956dcabe89bc8b87f3222f7a87d8a4127c
--- /dev/null
+++ b/templates/src/python/grpcio_tests/grpc_version.py.template
@@ -0,0 +1,34 @@
+%YAML 1.2
+--- |
+  # Copyright 2016, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
+
+  VERSION='${settings.python_version.pep440()}'
diff --git a/templates/tools/dockerfile/apt_get_pyenv.include b/templates/tools/dockerfile/apt_get_pyenv.include
new file mode 100644
index 0000000000000000000000000000000000000000..70e90289b70dc6f465cd87d20540047d69533804
--- /dev/null
+++ b/templates/tools/dockerfile/apt_get_pyenv.include
@@ -0,0 +1,18 @@
+# Install dependencies for pyenv
+RUN apt-get update && apt-get install -y ${'\\'}
+  libbz2-dev ${'\\'}
+  libncurses5-dev ${'\\'}
+  libncursesw5-dev ${'\\'}
+  libreadline-dev ${'\\'}
+  libsqlite3-dev ${'\\'}
+  libssl-dev ${'\\'}
+  llvm ${'\\'}
+  mercurial ${'\\'}
+  zlib1g-dev && apt-get clean
+
+# Install Pyenv and dev Python versions 3.5 and 3.6
+RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
+RUN pyenv update
+RUN pyenv install 3.5-dev
+RUN pyenv install 3.6-dev
+RUN pyenv local 3.5-dev 3.6-dev
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template
index 4cb8d3b088fdbf3b5c30be283870849b589ac169..da0c70aee0cd616b281ebbdaef8cf2143374780e 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../csharp_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template
index e39175a1ea8e01744d9e54135a8ea0e400b38ad1..aba0a0497e97b1431c57a4474d670c9d5d1324cd 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template
index 542c81d614c1255c379753939cfffd38fe4ba820..38a5ca725daf2636935f92135ff9da000f94a498 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM golang:1.5
   
   <%include file="../../go_path.include"/>
+  <%include file="../../python_deps.include"/>
   # Define the default command.
   CMD ["bash"]
   
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template
index 542c81d614c1255c379753939cfffd38fe4ba820..38a5ca725daf2636935f92135ff9da000f94a498 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM golang:1.5
   
   <%include file="../../go_path.include"/>
+  <%include file="../../python_deps.include"/>
   # Define the default command.
   CMD ["bash"]
   
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template
index c286e80826c2cf85df96f325873a0316bf7a1fa4..3d5e7ee1204b908f76de696bf9ce8b9e980aaae2 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template
@@ -32,7 +32,8 @@
   FROM debian:jessie
   
   <%include file="../../java_deps.include"/>
-  
+  <%include file="../../python_deps.include"/>
+
   # Trigger download of as many Gradle artifacts as possible.
   RUN git clone --recursive --depth 1 https://github.com/grpc/grpc-java.git && ${'\\'}
     cd grpc-java && ${'\\'}
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template
index 89bb9acc1afc29d076e3e8629a485774110ca0ae..01a03073a6f475b4f49ce55d6ee7dcc046844c75 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../node_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template
index 476f9d3d3eb01ed6c52d1a0eaf6235e91a02d179..6232e081eb986f33abe266c3b3f02cee280dab8a 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ruby_deps.include"/>
   <%include file="../../php_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template
index c3625b91fcc4a4285c3910b7bdc53849d58762b6..fbd8242391779156784429d66404f526f410cf2e 100644
--- a/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template
+++ b/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ruby_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/python_deps.include b/templates/tools/dockerfile/python_deps.include
index a559f96394c0f5ada6b390554ef4144f22e1a06c..26c91f495d546ff460af292606edb8950133c95b 100644
--- a/templates/tools/dockerfile/python_deps.include
+++ b/templates/tools/dockerfile/python_deps.include
@@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y ${'\\'}
 # Install Python packages from PyPI
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
diff --git a/templates/tools/dockerfile/run_tests_addons_nocache.include b/templates/tools/dockerfile/run_tests_addons_nocache.include
index 242a1acfb3a7fc22bae5274b4c750c6b1d51871f..74b01e386c3889bba2c07525fd32d7d0498c145c 100644
--- a/templates/tools/dockerfile/run_tests_addons_nocache.include
+++ b/templates/tools/dockerfile/run_tests_addons_nocache.include
@@ -1,6 +1,2 @@
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
index 074178252d0b31b16f97af1b4f5b51cff7e6e925..5d805bb4b22d1cf69fc38773f4371747f90124aa 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ccache_setup.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
index ac087c5da74ba8e5618834c2e18a1d4c20ccb6d8..18f06b770c3031a4c569f6035a66b6efa2042baf 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ccache_setup.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
index 3ed3d6556f9a768fee8fdd8ee580efa6139c4dd4..e02254cd53567b51dfbda412e73d3b1d266812d0 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM golang:1.5
   
   <%include file="../../gcp_api_libraries.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../go_path.include"/>
   # Define the default command.
   CMD ["bash"]
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
index 17ed99fd2e40ff10659b300136e63a0f7656efef..2bb2f9ba1e6f5e90b1913a6759c1bfb70b76c6de 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ccache_setup.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
index c50d38d1eca4f4d66505e92fe224bc9a2b943253..d70b751b141ec304786dfb4058e3bce62606561d 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../node_deps.include"/>
   <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
index 4cd069da34e6708c44716936833a228ecf0b88c2..f8dc07947420c82f2038128aa9431230e9c1a12e 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ruby_deps.include"/>
   <%include file="../../gcp_api_libraries.include"/>
   <%include file="../../php_deps.include"/>
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
index 8b933aaa324c27181186b9331956e595316a84bf..18199771d7ec0ff281545f7fc4f6940db58cf9ac 100644
--- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ccache_setup.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../gcp_api_libraries.include"/>
diff --git a/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template
new file mode 100644
index 0000000000000000000000000000000000000000..24dad488077d8730619f570c52259957e8606500
--- /dev/null
+++ b/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template
@@ -0,0 +1,55 @@
+%YAML 1.2
+--- |
+  # Copyright 2015, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  
+  FROM debian:jessie
+  
+  <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
+  <%include file="../../csharp_deps.include"/>
+  
+  # Install dotnet SDK based on https://www.microsoft.com/net/core#debian
+  RUN apt-get update && apt-get install -y curl libunwind8 gettext
+  RUN curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?LinkID=809130
+  RUN mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet
+  RUN ln -s /opt/dotnet/dotnet /usr/local/bin
+  
+  # Trigger the population of the local package cache
+  ENV NUGET_XMLDOC_MODE skip
+  RUN mkdir warmup ${'\\'}
+      && cd warmup ${'\\'}
+      && dotnet new ${'\\'}
+      && cd .. ${'\\'}
+      && rm -rf warmup
+  
+  <%include file="../../run_tests_addons.include"/>
+  # Define the default command.
+  CMD ["bash"]
+  
diff --git a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
index 4cb8d3b088fdbf3b5c30be283870849b589ac169..da0c70aee0cd616b281ebbdaef8cf2143374780e 100644
--- a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../csharp_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
index 04abf9f741ed257f2f84e511a0ef77f93e0fd44f..04767248b8b7dadd556b38d173534576d10eaa9a 100644
--- a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../clang_update.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
index 7f43e759fc0e1e1671b3fb75a0e9c7108484065a..49fbea0f4543abc43b3e3c139c0b100f24855a61 100644
--- a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
@@ -32,8 +32,8 @@
   FROM 32bit/debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
   CMD ["bash"]
-  
\ No newline at end of file
diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
index 4950a82d2d72860886a33cc629ab9bfc013abaa3..8a95cad6492baed88108e3ce4663d66baf5690ea 100644
--- a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM ubuntu:14.04
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons_nocache.include"/>
   # Define the default command.
diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
index e39537975bf21e3c4a222c7057670e39fa141f34..42ad6c130da5f69006d46a6f1f5f4fda35c34c20 100644
--- a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM ubuntu:16.04
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   
@@ -42,4 +43,3 @@
   
   # Define the default command.
   CMD ["bash"]
-  
\ No newline at end of file
diff --git a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
index e77b3d9e41463fa5b83dd2b1811ad532a2a1eb50..b6a3b0d5d2102a4590da83fab4fb8e42081edecc 100644
--- a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:wheezy
 
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
 
   RUN apt-get update && apt-get install -y ${'\\'}
diff --git a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
index 33df275908406cbe6dea47d933b43e0dda702dc6..6d7cb72f2742f98a9c95d8bf7b09092fbb914b15 100644
--- a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
+++ b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM debian:jessie
 
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../cxx_deps.include"/>
   <%include file="../../clang_update.include"/>
   <%include file="../../run_tests_addons.include"/>
diff --git a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
index 5a6233343e9d2e6f53e3b438421b6d73ea89e753..72b098f0c2048666ee5b153acf2040afa92a8b97 100644
--- a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../node_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
   CMD ["bash"]
-  
\ No newline at end of file
diff --git a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
index fffac89efcd0cb814dea764bd5f2676a0249f47f..0cfa373c9054090269036d0bbc718c58ce96086b 100644
--- a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../php_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
   CMD ["bash"]
-  
\ No newline at end of file
diff --git a/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
new file mode 100644
index 0000000000000000000000000000000000000000..f9a4dcb7b6c30e3c83fed4db68bb7ee224e26f7e
--- /dev/null
+++ b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template
@@ -0,0 +1,39 @@
+%YAML 1.2
+--- |
+  # Copyright 2016, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  
+  FROM debian:jessie
+  
+  <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
+  <%include file="../../apt_get_pyenv.include"/>
+  <%include file="../../run_tests_addons.include"/>
+  # Define the default command.
+  CMD ["bash"]
diff --git a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
index 70baddffbf9fff94b73dceaf035233e3d9b0065e..35838bc11e420dc57e19d93add5399f4487bad8d 100644
--- a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
+++ b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template
@@ -32,8 +32,8 @@
   FROM debian:jessie
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   <%include file="../../ruby_deps.include"/>
   <%include file="../../run_tests_addons.include"/>
   # Define the default command.
   CMD ["bash"]
-  
\ No newline at end of file
diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template
index 9987e3526085634e228568ecdc1ad4536f44e27f..12309b64d1606d7d46e15568a912f255214fcfa1 100644
--- a/templates/tools/dockerfile/test/sanity/Dockerfile.template
+++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template
@@ -32,6 +32,7 @@
   FROM ubuntu:15.10
   
   <%include file="../../apt_get_basic.include"/>
+  <%include file="../../python_deps.include"/>
   #========================
   # Sanity test dependencies
   RUN apt-get update && apt-get install -y ${"\\"}
diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c
new file mode 100644
index 0000000000000000000000000000000000000000..cca75f54a5f4fe0e4ed7b33d24f24c33dabae15a
--- /dev/null
+++ b/test/core/end2end/bad_server_response_test.c
@@ -0,0 +1,340 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice.h>
+#include <grpc/support/thd.h>
+
+// #include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/support/string.h"
+#include "test/core/end2end/cq_verifier.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/core/util/test_tcp_server.h"
+
+#define HTTP1_RESP                           \
+  "HTTP/1.0 400 Bad Request\n"               \
+  "Content-Type: text/html; charset=UTF-8\n" \
+  "Content-Length: 0\n"                      \
+  "Date: Tue, 07 Jun 2016 17:43:20 GMT\n\n"
+
+#define HTTP2_RESP(STATUS_CODE)          \
+  "\x00\x00\x00\x04\x00\x00\x00\x00\x00" \
+  "\x00\x00>\x01\x04\x00\x00\x00\x01"    \
+  "\x10\x0e"                             \
+  "content-length\x01"                   \
+  "0"                                    \
+  "\x10\x0c"                             \
+  "content-type\x10"                     \
+  "application/grpc"                     \
+  "\x10\x07:status\x03" #STATUS_CODE
+
+#define UNPARSEABLE_RESP "Bad Request\n"
+
+#define HTTP2_DETAIL_MSG(STATUS_CODE) \
+  "Received http2 header with status: " #STATUS_CODE
+
+#define UNPARSEABLE_DETAIL_MSG "Failed parsing HTTP/2"
+
+/* TODO(zyc) Check the content of incomming data instead of using this length */
+#define EXPECTED_INCOMING_DATA_LENGTH (size_t)310
+
+struct rpc_state {
+  char *target;
+  grpc_completion_queue *cq;
+  grpc_channel *channel;
+  grpc_call *call;
+  size_t incoming_data_length;
+  gpr_slice_buffer temp_incoming_buffer;
+  gpr_slice_buffer outgoing_buffer;
+  grpc_endpoint *tcp;
+  gpr_atm done_atm;
+  bool write_done;
+  const char *response_payload;
+  size_t response_payload_length;
+};
+
+static int server_port;
+static struct rpc_state state;
+static grpc_closure on_read;
+static grpc_closure on_write;
+
+static void *tag(intptr_t t) { return (void *)t; }
+
+static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+  GPR_ASSERT(error == GRPC_ERROR_NONE);
+
+  gpr_atm_rel_store(&state.done_atm, 1);
+}
+
+static void handle_write(grpc_exec_ctx *exec_ctx) {
+  gpr_slice slice = gpr_slice_from_copied_buffer(state.response_payload,
+                                                 state.response_payload_length);
+
+  gpr_slice_buffer_reset_and_unref(&state.outgoing_buffer);
+  gpr_slice_buffer_add(&state.outgoing_buffer, slice);
+  grpc_endpoint_write(exec_ctx, state.tcp, &state.outgoing_buffer, &on_write);
+}
+
+static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+  GPR_ASSERT(error == GRPC_ERROR_NONE);
+  state.incoming_data_length += state.temp_incoming_buffer.length;
+
+  size_t i;
+  for (i = 0; i < state.temp_incoming_buffer.count; i++) {
+    char *dump = gpr_dump_slice(state.temp_incoming_buffer.slices[i],
+                                GPR_DUMP_HEX | GPR_DUMP_ASCII);
+    gpr_log(GPR_DEBUG, "Server received: %s", dump);
+    gpr_free(dump);
+  }
+
+  gpr_log(GPR_DEBUG, "got %" PRIuPTR " bytes, expected %" PRIuPTR " bytes",
+          state.incoming_data_length, EXPECTED_INCOMING_DATA_LENGTH);
+  if (state.incoming_data_length > EXPECTED_INCOMING_DATA_LENGTH) {
+    handle_write(exec_ctx);
+  } else {
+    grpc_endpoint_read(exec_ctx, state.tcp, &state.temp_incoming_buffer,
+                       &on_read);
+  }
+}
+
+static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
+                       grpc_pollset *accepting_pollset,
+                       grpc_tcp_server_acceptor *acceptor) {
+  test_tcp_server *server = arg;
+  grpc_closure_init(&on_read, handle_read, NULL);
+  grpc_closure_init(&on_write, done_write, NULL);
+  gpr_slice_buffer_init(&state.temp_incoming_buffer);
+  gpr_slice_buffer_init(&state.outgoing_buffer);
+  state.tcp = tcp;
+  state.incoming_data_length = 0;
+  grpc_endpoint_add_to_pollset(exec_ctx, tcp, server->pollset);
+  grpc_endpoint_read(exec_ctx, tcp, &state.temp_incoming_buffer, &on_read);
+}
+
+static gpr_timespec n_sec_deadline(int seconds) {
+  return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+                      gpr_time_from_seconds(seconds, GPR_TIMESPAN));
+}
+
+static void start_rpc(int target_port, grpc_status_code expected_status,
+                      const char *expected_detail) {
+  grpc_op ops[6];
+  grpc_op *op;
+  grpc_metadata_array initial_metadata_recv;
+  grpc_metadata_array trailing_metadata_recv;
+  grpc_status_code status;
+  grpc_call_error error;
+  cq_verifier *cqv;
+  char *details = NULL;
+  size_t details_capacity = 0;
+
+  state.cq = grpc_completion_queue_create(NULL);
+  cqv = cq_verifier_create(state.cq);
+  gpr_join_host_port(&state.target, "127.0.0.1", target_port);
+  state.channel = grpc_insecure_channel_create(state.target, NULL, NULL);
+  state.call = grpc_channel_create_call(
+      state.channel, NULL, GRPC_PROPAGATE_DEFAULTS, state.cq, "/Service/Method",
+      "localhost", gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+
+  grpc_metadata_array_init(&initial_metadata_recv);
+  grpc_metadata_array_init(&trailing_metadata_recv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_RECV_INITIAL_METADATA;
+  op->data.recv_initial_metadata = &initial_metadata_recv;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+  op->data.recv_status_on_client.status = &status;
+  op->data.recv_status_on_client.status_details = &details;
+  op->data.recv_status_on_client.status_details_capacity = &details_capacity;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  error =
+      grpc_call_start_batch(state.call, ops, (size_t)(op - ops), tag(1), NULL);
+
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  cq_expect_completion(cqv, tag(1), 1);
+  cq_verify(cqv);
+
+  gpr_log(GPR_DEBUG, "Rpc status: %d, details: %s", status, details);
+  GPR_ASSERT(status == expected_status);
+  GPR_ASSERT(NULL != strstr(details, expected_detail));
+
+  grpc_metadata_array_destroy(&initial_metadata_recv);
+  grpc_metadata_array_destroy(&trailing_metadata_recv);
+  gpr_free(details);
+  cq_verifier_destroy(cqv);
+}
+
+static void cleanup_rpc(void) {
+  grpc_event ev;
+  gpr_slice_buffer_destroy(&state.temp_incoming_buffer);
+  gpr_slice_buffer_destroy(&state.outgoing_buffer);
+  grpc_call_destroy(state.call);
+  grpc_completion_queue_shutdown(state.cq);
+  do {
+    ev = grpc_completion_queue_next(state.cq, n_sec_deadline(1), NULL);
+  } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+  grpc_completion_queue_destroy(state.cq);
+  grpc_channel_destroy(state.channel);
+  gpr_free(state.target);
+}
+
+typedef struct {
+  test_tcp_server *server;
+  gpr_event *signal_when_done;
+} poll_args;
+
+static void actually_poll_server(void *arg) {
+  poll_args *pa = arg;
+  gpr_timespec deadline = n_sec_deadline(10);
+  while (true) {
+    bool done = gpr_atm_acq_load(&state.done_atm) != 0;
+    gpr_timespec time_left =
+        gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME));
+    gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done,
+            time_left.tv_sec, time_left.tv_nsec);
+    if (done || gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) < 0) {
+      break;
+    }
+    test_tcp_server_poll(pa->server, 1);
+  }
+  gpr_event_set(pa->signal_when_done, (void *)1);
+  gpr_free(pa);
+}
+
+static void poll_server_until_read_done(test_tcp_server *server,
+                                        gpr_event *signal_when_done) {
+  gpr_atm_rel_store(&state.done_atm, 0);
+  state.write_done = 0;
+  gpr_thd_id id;
+  poll_args *pa = gpr_malloc(sizeof(*pa));
+  pa->server = server;
+  pa->signal_when_done = signal_when_done;
+  gpr_thd_new(&id, actually_poll_server, pa, NULL);
+}
+
+static void run_test(const char *response_payload,
+                     size_t response_payload_length,
+                     grpc_status_code expected_status,
+                     const char *expected_detail) {
+  test_tcp_server test_server;
+  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  gpr_event ev;
+
+  grpc_init();
+  gpr_event_init(&ev);
+  server_port = grpc_pick_unused_port_or_die();
+  test_tcp_server_init(&test_server, on_connect, &test_server);
+  test_tcp_server_start(&test_server, server_port);
+  state.response_payload = response_payload;
+  state.response_payload_length = response_payload_length;
+
+  /* poll server until sending out the response */
+  poll_server_until_read_done(&test_server, &ev);
+  start_rpc(server_port, expected_status, expected_detail);
+  gpr_event_wait(&ev, gpr_inf_future(GPR_CLOCK_REALTIME));
+
+  /* clean up */
+  grpc_endpoint_shutdown(&exec_ctx, state.tcp);
+  grpc_endpoint_destroy(&exec_ctx, state.tcp);
+  grpc_exec_ctx_finish(&exec_ctx);
+  cleanup_rpc();
+  test_tcp_server_destroy(&test_server);
+
+  grpc_shutdown();
+}
+
+int main(int argc, char **argv) {
+  grpc_test_init(argc, argv);
+
+  /* status defined in hpack static table */
+  run_test(HTTP2_RESP(204), sizeof(HTTP2_RESP(204)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(204));
+
+  run_test(HTTP2_RESP(206), sizeof(HTTP2_RESP(206)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(206));
+
+  run_test(HTTP2_RESP(304), sizeof(HTTP2_RESP(304)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(304));
+
+  run_test(HTTP2_RESP(400), sizeof(HTTP2_RESP(400)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(400));
+
+  run_test(HTTP2_RESP(404), sizeof(HTTP2_RESP(404)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(404));
+
+  run_test(HTTP2_RESP(500), sizeof(HTTP2_RESP(500)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(500));
+
+  /* status not defined in hpack static table */
+  run_test(HTTP2_RESP(401), sizeof(HTTP2_RESP(401)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(401));
+
+  run_test(HTTP2_RESP(403), sizeof(HTTP2_RESP(403)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(403));
+
+  run_test(HTTP2_RESP(502), sizeof(HTTP2_RESP(502)) - 1, GRPC_STATUS_CANCELLED,
+           HTTP2_DETAIL_MSG(502));
+
+  /* unparseable response */
+  run_test(UNPARSEABLE_RESP, sizeof(UNPARSEABLE_RESP) - 1,
+           GRPC_STATUS_UNAVAILABLE, UNPARSEABLE_DETAIL_MSG);
+
+  /* http1 response */
+  run_test(HTTP1_RESP, sizeof(HTTP1_RESP) - 1, GRPC_STATUS_UNAVAILABLE,
+           UNPARSEABLE_DETAIL_MSG);
+
+  return 0;
+}
diff --git a/test/core/end2end/cq_verifier.c b/test/core/end2end/cq_verifier.c
index 060e166e6ebf55f17e8bc6d2c20644c97c04e33d..b77568058cdecafca84168aa044ad07d739e49fb 100644
--- a/test/core/end2end/cq_verifier.c
+++ b/test/core/end2end/cq_verifier.c
@@ -149,7 +149,8 @@ int byte_buffer_eq_string(grpc_byte_buffer *bb, const char *str) {
   grpc_byte_buffer *rbb;
   int res;
 
-  grpc_byte_buffer_reader_init(&reader, bb);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, bb) &&
+             "Couldn't init byte buffer reader");
   rbb = grpc_raw_byte_buffer_from_reader(&reader);
   res = byte_buffer_eq_slice(rbb, gpr_slice_from_copied_string(str));
   grpc_byte_buffer_reader_destroy(&reader);
diff --git a/test/core/end2end/dualstack_socket_test.c b/test/core/end2end/dualstack_socket_test.c
index 65a8deb663ddf3f6b495a17a82e579b6c9300cba..348b9ed5f02d4190f511539e093e775df8fb08e0 100644
--- a/test/core/end2end/dualstack_socket_test.c
+++ b/test/core/end2end/dualstack_socket_test.c
@@ -273,7 +273,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
 }
 
 int external_dns_works(const char *host) {
-  grpc_resolved_addresses *res;
+  grpc_resolved_addresses *res = NULL;
   grpc_error *error = grpc_blocking_resolve_address(host, "80", &res);
   GRPC_ERROR_UNREF(error);
   if (res != NULL) {
diff --git a/test/core/end2end/end2end_nosec_tests.c b/test/core/end2end/end2end_nosec_tests.c
index b71299c09e0d6277600ef0dde3c12f012d79756c..03e55f1181221ee69c87951760f43c0b6f88855c 100644
--- a/test/core/end2end/end2end_nosec_tests.c
+++ b/test/core/end2end/end2end_nosec_tests.c
@@ -89,6 +89,8 @@ extern void max_message_length(grpc_end2end_test_config config);
 extern void max_message_length_pre_init(void);
 extern void negative_deadline(grpc_end2end_test_config config);
 extern void negative_deadline_pre_init(void);
+extern void network_status_change(grpc_end2end_test_config config);
+extern void network_status_change_pre_init(void);
 extern void no_op(grpc_end2end_test_config config);
 extern void no_op_pre_init(void);
 extern void payload(grpc_end2end_test_config config);
@@ -115,6 +117,8 @@ extern void simple_metadata(grpc_end2end_test_config config);
 extern void simple_metadata_pre_init(void);
 extern void simple_request(grpc_end2end_test_config config);
 extern void simple_request_pre_init(void);
+extern void streaming_error_response(grpc_end2end_test_config config);
+extern void streaming_error_response_pre_init(void);
 extern void trailing_metadata(grpc_end2end_test_config config);
 extern void trailing_metadata_pre_init(void);
 
@@ -144,6 +148,7 @@ void grpc_end2end_tests_pre_init(void) {
   max_concurrent_streams_pre_init();
   max_message_length_pre_init();
   negative_deadline_pre_init();
+  network_status_change_pre_init();
   no_op_pre_init();
   payload_pre_init();
   ping_pre_init();
@@ -157,6 +162,7 @@ void grpc_end2end_tests_pre_init(void) {
   simple_delayed_request_pre_init();
   simple_metadata_pre_init();
   simple_request_pre_init();
+  streaming_error_response_pre_init();
   trailing_metadata_pre_init();
 }
 
@@ -190,6 +196,7 @@ void grpc_end2end_tests(int argc, char **argv,
     max_concurrent_streams(config);
     max_message_length(config);
     negative_deadline(config);
+    network_status_change(config);
     no_op(config);
     payload(config);
     ping(config);
@@ -203,6 +210,7 @@ void grpc_end2end_tests(int argc, char **argv,
     simple_delayed_request(config);
     simple_metadata(config);
     simple_request(config);
+    streaming_error_response(config);
     trailing_metadata(config);
     return;
   }
@@ -300,6 +308,10 @@ void grpc_end2end_tests(int argc, char **argv,
       negative_deadline(config);
       continue;
     }
+    if (0 == strcmp("network_status_change", argv[i])) {
+      network_status_change(config);
+      continue;
+    }
     if (0 == strcmp("no_op", argv[i])) {
       no_op(config);
       continue;
@@ -352,6 +364,10 @@ void grpc_end2end_tests(int argc, char **argv,
       simple_request(config);
       continue;
     }
+    if (0 == strcmp("streaming_error_response", argv[i])) {
+      streaming_error_response(config);
+      continue;
+    }
     if (0 == strcmp("trailing_metadata", argv[i])) {
       trailing_metadata(config);
       continue;
diff --git a/test/core/end2end/end2end_tests.c b/test/core/end2end/end2end_tests.c
index 00c9c44a78c63777377b6a8dc0073378fbfb5598..877b1b198930118ce2b36a33062a7384dcbfa6db 100644
--- a/test/core/end2end/end2end_tests.c
+++ b/test/core/end2end/end2end_tests.c
@@ -91,6 +91,8 @@ extern void max_message_length(grpc_end2end_test_config config);
 extern void max_message_length_pre_init(void);
 extern void negative_deadline(grpc_end2end_test_config config);
 extern void negative_deadline_pre_init(void);
+extern void network_status_change(grpc_end2end_test_config config);
+extern void network_status_change_pre_init(void);
 extern void no_op(grpc_end2end_test_config config);
 extern void no_op_pre_init(void);
 extern void payload(grpc_end2end_test_config config);
@@ -117,6 +119,8 @@ extern void simple_metadata(grpc_end2end_test_config config);
 extern void simple_metadata_pre_init(void);
 extern void simple_request(grpc_end2end_test_config config);
 extern void simple_request_pre_init(void);
+extern void streaming_error_response(grpc_end2end_test_config config);
+extern void streaming_error_response_pre_init(void);
 extern void trailing_metadata(grpc_end2end_test_config config);
 extern void trailing_metadata_pre_init(void);
 
@@ -147,6 +151,7 @@ void grpc_end2end_tests_pre_init(void) {
   max_concurrent_streams_pre_init();
   max_message_length_pre_init();
   negative_deadline_pre_init();
+  network_status_change_pre_init();
   no_op_pre_init();
   payload_pre_init();
   ping_pre_init();
@@ -160,6 +165,7 @@ void grpc_end2end_tests_pre_init(void) {
   simple_delayed_request_pre_init();
   simple_metadata_pre_init();
   simple_request_pre_init();
+  streaming_error_response_pre_init();
   trailing_metadata_pre_init();
 }
 
@@ -194,6 +200,7 @@ void grpc_end2end_tests(int argc, char **argv,
     max_concurrent_streams(config);
     max_message_length(config);
     negative_deadline(config);
+    network_status_change(config);
     no_op(config);
     payload(config);
     ping(config);
@@ -207,6 +214,7 @@ void grpc_end2end_tests(int argc, char **argv,
     simple_delayed_request(config);
     simple_metadata(config);
     simple_request(config);
+    streaming_error_response(config);
     trailing_metadata(config);
     return;
   }
@@ -308,6 +316,10 @@ void grpc_end2end_tests(int argc, char **argv,
       negative_deadline(config);
       continue;
     }
+    if (0 == strcmp("network_status_change", argv[i])) {
+      network_status_change(config);
+      continue;
+    }
     if (0 == strcmp("no_op", argv[i])) {
       no_op(config);
       continue;
@@ -360,6 +372,10 @@ void grpc_end2end_tests(int argc, char **argv,
       simple_request(config);
       continue;
     }
+    if (0 == strcmp("streaming_error_response", argv[i])) {
+      streaming_error_response(config);
+      continue;
+    }
     if (0 == strcmp("trailing_metadata", argv[i])) {
       trailing_metadata(config);
       continue;
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/2c452818a10ddef09b90c89a53db14b9b56b21f3 b/test/core/end2end/fuzzers/client_fuzzer_corpus/2c452818a10ddef09b90c89a53db14b9b56b21f3
new file mode 100644
index 0000000000000000000000000000000000000000..059634fda10a679b7bb4f753fe06660888b26dd4
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/2c452818a10ddef09b90c89a53db14b9b56b21f3 differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/42ead79c94eccdf8a8c3d8036be73e14fa260dd5 b/test/core/end2end/fuzzers/client_fuzzer_corpus/42ead79c94eccdf8a8c3d8036be73e14fa260dd5
new file mode 100644
index 0000000000000000000000000000000000000000..b9c53b26edd94105bc4bdb24e73697cc4be93dc4
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/42ead79c94eccdf8a8c3d8036be73e14fa260dd5 differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/4e05d6cf1c3f0c04f6ee92d09a53ee0fe35c085a b/test/core/end2end/fuzzers/client_fuzzer_corpus/4e05d6cf1c3f0c04f6ee92d09a53ee0fe35c085a
new file mode 100644
index 0000000000000000000000000000000000000000..8a4a279998d72b2aa9b94310364f40c2580515c2
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/4e05d6cf1c3f0c04f6ee92d09a53ee0fe35c085a differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/8f980dd25f1c77e3536131c2c620aa32e8c13180 b/test/core/end2end/fuzzers/client_fuzzer_corpus/8f980dd25f1c77e3536131c2c620aa32e8c13180
new file mode 100644
index 0000000000000000000000000000000000000000..fcebab7a64f25b8dfad2cbb19851b76833a674b7
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/8f980dd25f1c77e3536131c2c620aa32e8c13180 differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/aef36c49d7dec0dcf8cdc224d9e9221fa2cb1db0 b/test/core/end2end/fuzzers/client_fuzzer_corpus/aef36c49d7dec0dcf8cdc224d9e9221fa2cb1db0
new file mode 100644
index 0000000000000000000000000000000000000000..6b015fe66e6131d746ff41da4b961b0ea074a02f
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/aef36c49d7dec0dcf8cdc224d9e9221fa2cb1db0 differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/crash-14ed70cd9ea7987cdd0c8f6e39398ee7c60ee2ff b/test/core/end2end/fuzzers/client_fuzzer_corpus/crash-14ed70cd9ea7987cdd0c8f6e39398ee7c60ee2ff
new file mode 100644
index 0000000000000000000000000000000000000000..be6366049d93b585d80d625c72ec8eabc458f8f5
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/crash-14ed70cd9ea7987cdd0c8f6e39398ee7c60ee2ff differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/dcb06a6e34cbed15515e5b3581ca666f704777bd b/test/core/end2end/fuzzers/client_fuzzer_corpus/dcb06a6e34cbed15515e5b3581ca666f704777bd
new file mode 100644
index 0000000000000000000000000000000000000000..92750f94a32152bdd71d388f47c4bcefb6ec34b8
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/dcb06a6e34cbed15515e5b3581ca666f704777bd differ
diff --git a/test/core/end2end/fuzzers/client_fuzzer_corpus/ea46b684f1e67a27c231f2d536c41da631189b9c b/test/core/end2end/fuzzers/client_fuzzer_corpus/ea46b684f1e67a27c231f2d536c41da631189b9c
new file mode 100644
index 0000000000000000000000000000000000000000..c891d9adc8d1254cc043625d2fd9e424618666a0
Binary files /dev/null and b/test/core/end2end/fuzzers/client_fuzzer_corpus/ea46b684f1e67a27c231f2d536c41da631189b9c differ
diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py
index 325d9b3cad0ffd2fb5faabcea1e59e337b1c39b5..fb7275474d394d7edba25d80edffbfee58451cb5 100755
--- a/test/core/end2end/gen_build_yaml.py
+++ b/test/core/end2end/gen_build_yaml.py
@@ -112,6 +112,7 @@ END2END_TESTS = {
     'max_concurrent_streams': default_test_options._replace(proxyable=False),
     'max_message_length': default_test_options,
     'negative_deadline': default_test_options,
+    'network_status_change': default_test_options,
     'no_op': default_test_options,
     'payload': default_test_options,
     'ping_pong_streaming': default_test_options,
@@ -126,6 +127,7 @@ END2END_TESTS = {
     'simple_delayed_request': connectivity_test_options,
     'simple_metadata': default_test_options,
     'simple_request': default_test_options,
+    'streaming_error_response': default_test_options,
     'trailing_metadata': default_test_options,
 }
 
diff --git a/test/core/end2end/tests/network_status_change.c b/test/core/end2end/tests/network_status_change.c
new file mode 100644
index 0000000000000000000000000000000000000000..10207844ab039b932387e28729a00d1191dbd063
--- /dev/null
+++ b/test/core/end2end/tests/network_status_change.c
@@ -0,0 +1,241 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "test/core/end2end/end2end_tests.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "test/core/end2end/cq_verifier.h"
+
+/* this is a private API but exposed here for testing*/
+extern void grpc_network_status_shutdown_all_endpoints();
+
+enum { TIMEOUT = 200000 };
+
+static void *tag(intptr_t t) { return (void *)t; }
+
+static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
+                                            const char *test_name,
+                                            grpc_channel_args *client_args,
+                                            grpc_channel_args *server_args) {
+  grpc_end2end_test_fixture f;
+  gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
+  f = config.create_fixture(client_args, server_args);
+  config.init_server(&f, server_args);
+  config.init_client(&f, client_args);
+  return f;
+}
+
+static gpr_timespec n_seconds_time(int n) {
+  return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
+}
+
+static gpr_timespec five_seconds_time(void) { return n_seconds_time(500); }
+
+static void drain_cq(grpc_completion_queue *cq) {
+  grpc_event ev;
+  do {
+    ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
+  } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+}
+
+static void shutdown_server(grpc_end2end_test_fixture *f) {
+  if (!f->server) return;
+  grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
+  GPR_ASSERT(grpc_completion_queue_pluck(
+                 f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
+                 .type == GRPC_OP_COMPLETE);
+  grpc_server_destroy(f->server);
+  f->server = NULL;
+}
+
+static void shutdown_client(grpc_end2end_test_fixture *f) {
+  if (!f->client) return;
+  grpc_channel_destroy(f->client);
+  f->client = NULL;
+}
+
+static void end_test(grpc_end2end_test_fixture *f) {
+  shutdown_server(f);
+  shutdown_client(f);
+
+  grpc_completion_queue_shutdown(f->cq);
+  drain_cq(f->cq);
+  grpc_completion_queue_destroy(f->cq);
+}
+
+/* Client sends a request with payload, server reads then returns status. */
+static void test_invoke_network_status_change(grpc_end2end_test_config config) {
+  grpc_call *c;
+  grpc_call *s;
+  gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
+  grpc_byte_buffer *request_payload =
+      grpc_raw_byte_buffer_create(&request_payload_slice, 1);
+  gpr_timespec deadline = five_seconds_time();
+  grpc_end2end_test_fixture f =
+      begin_test(config, "test_invoke_request_with_payload", NULL, NULL);
+  cq_verifier *cqv = cq_verifier_create(f.cq);
+  grpc_op ops[6];
+  grpc_op *op;
+  grpc_metadata_array initial_metadata_recv;
+  grpc_metadata_array trailing_metadata_recv;
+  grpc_metadata_array request_metadata_recv;
+  grpc_byte_buffer *request_payload_recv = NULL;
+  grpc_call_details call_details;
+  grpc_status_code status;
+  grpc_call_error error;
+  char *details = NULL;
+  size_t details_capacity = 0;
+  int was_cancelled = 2;
+
+  c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
+                               "/foo", "foo.test.google.fr", deadline, NULL);
+  GPR_ASSERT(c);
+
+  grpc_metadata_array_init(&initial_metadata_recv);
+  grpc_metadata_array_init(&trailing_metadata_recv);
+  grpc_metadata_array_init(&request_metadata_recv);
+  grpc_call_details_init(&call_details);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_SEND_MESSAGE;
+  op->data.send_message = request_payload;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_RECV_INITIAL_METADATA;
+  op->data.recv_initial_metadata = &initial_metadata_recv;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+  op->data.recv_status_on_client.status = &status;
+  op->data.recv_status_on_client.status_details = &details;
+  op->data.recv_status_on_client.status_details_capacity = &details_capacity;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
+                                 f.server, &s, &call_details,
+                                 &request_metadata_recv, f.cq, f.cq, tag(101)));
+  cq_expect_completion(cqv, tag(101), 1);
+  cq_verify(cqv);
+
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_RECV_MESSAGE;
+  op->data.recv_message = &request_payload_recv;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  cq_expect_completion(cqv, tag(102), 1);
+  // Simulate the network loss event
+  grpc_network_status_shutdown_all_endpoints();
+  cq_verify(cqv);
+
+  op = ops;
+  op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+  op->data.recv_close_on_server.cancelled = &was_cancelled;
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+  op->data.send_status_from_server.trailing_metadata_count = 0;
+  op->data.send_status_from_server.status = GRPC_STATUS_OK;
+  op->data.send_status_from_server.status_details = "xyz";
+  op->flags = 0;
+  op->reserved = NULL;
+  op++;
+  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+  void shutdown_all_endpoints();
+  cq_expect_completion(cqv, tag(103), 1);
+  cq_expect_completion(cqv, tag(1), 1);
+  cq_verify(cqv);
+
+  // Expected behavior of a RPC when network is lost.
+  GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
+  GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
+  GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr"));
+  GPR_ASSERT(was_cancelled == 0);
+
+  gpr_free(details);
+  grpc_metadata_array_destroy(&initial_metadata_recv);
+  grpc_metadata_array_destroy(&trailing_metadata_recv);
+  grpc_metadata_array_destroy(&request_metadata_recv);
+  grpc_call_details_destroy(&call_details);
+
+  grpc_call_destroy(c);
+  grpc_call_destroy(s);
+
+  cq_verifier_destroy(cqv);
+
+  grpc_byte_buffer_destroy(request_payload);
+  grpc_byte_buffer_destroy(request_payload_recv);
+
+  end_test(&f);
+  config.tear_down_data(&f);
+}
+
+void network_status_change(grpc_end2end_test_config config) {
+  test_invoke_network_status_change(config);
+}
+
+void network_status_change_pre_init(void) {}
diff --git a/test/core/end2end/tests/streaming_error_response.c b/test/core/end2end/tests/streaming_error_response.c
new file mode 100644
index 0000000000000000000000000000000000000000..e15c132d6332ea93df66c32b0c13f9737a1699b0
--- /dev/null
+++ b/test/core/end2end/tests/streaming_error_response.c
@@ -0,0 +1,278 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "test/core/end2end/end2end_tests.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "test/core/end2end/cq_verifier.h"
+
+enum { TIMEOUT = 200000 };
+
+static void *tag(intptr_t t) { return (void *)t; }
+
+static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
+                                            const char *test_name,
+                                            grpc_channel_args *client_args,
+                                            grpc_channel_args *server_args,
+                                            bool request_status_early) {
+  grpc_end2end_test_fixture f;
+  gpr_log(GPR_INFO, "%s/%s/request_status_early=%s", test_name, config.name,
+          request_status_early ? "true" : "false");
+  f = config.create_fixture(client_args, server_args);
+  config.init_server(&f, server_args);
+  config.init_client(&f, client_args);
+  return f;
+}
+
+static gpr_timespec n_seconds_time(int n) {
+  return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
+}
+
+static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
+
+static void drain_cq(grpc_completion_queue *cq) {
+  grpc_event ev;
+  do {
+    ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
+  } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+}
+
+static void shutdown_server(grpc_end2end_test_fixture *f) {
+  if (!f->server) return;
+  grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
+  GPR_ASSERT(grpc_completion_queue_pluck(
+                 f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
+                 .type == GRPC_OP_COMPLETE);
+  grpc_server_destroy(f->server);
+  f->server = NULL;
+}
+
+static void shutdown_client(grpc_end2end_test_fixture *f) {
+  if (!f->client) return;
+  grpc_channel_destroy(f->client);
+  f->client = NULL;
+}
+
+static void end_test(grpc_end2end_test_fixture *f) {
+  shutdown_server(f);
+  shutdown_client(f);
+
+  grpc_completion_queue_shutdown(f->cq);
+  drain_cq(f->cq);
+  grpc_completion_queue_destroy(f->cq);
+}
+
+/* Client sends a request with payload, server reads then returns status. */
+static void test(grpc_end2end_test_config config, bool request_status_early) {
+  grpc_call *c;
+  grpc_call *s;
+  gpr_slice response_payload1_slice = gpr_slice_from_copied_string("hello");
+  grpc_byte_buffer *response_payload1 =
+      grpc_raw_byte_buffer_create(&response_payload1_slice, 1);
+  gpr_slice response_payload2_slice = gpr_slice_from_copied_string("world");
+  grpc_byte_buffer *response_payload2 =
+      grpc_raw_byte_buffer_create(&response_payload2_slice, 1);
+  gpr_timespec deadline = five_seconds_time();
+  grpc_end2end_test_fixture f = begin_test(config, "streaming_error_response",
+                                           NULL, NULL, request_status_early);
+  cq_verifier *cqv = cq_verifier_create(f.cq);
+  grpc_op ops[6];
+  grpc_op *op;
+  grpc_metadata_array initial_metadata_recv;
+  grpc_metadata_array trailing_metadata_recv;
+  grpc_metadata_array request_metadata_recv;
+  grpc_byte_buffer *response_payload1_recv = NULL;
+  grpc_byte_buffer *response_payload2_recv = NULL;
+  grpc_call_details call_details;
+  grpc_status_code status;
+  grpc_call_error error;
+  char *details = NULL;
+  size_t details_capacity = 0;
+  int was_cancelled = 2;
+
+  c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
+                               "/foo", "foo.test.google.fr", deadline, NULL);
+  GPR_ASSERT(c);
+
+  grpc_metadata_array_init(&initial_metadata_recv);
+  grpc_metadata_array_init(&trailing_metadata_recv);
+  grpc_metadata_array_init(&request_metadata_recv);
+  grpc_call_details_init(&call_details);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op++;
+  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+  op++;
+  op->op = GRPC_OP_RECV_INITIAL_METADATA;
+  op->data.recv_initial_metadata = &initial_metadata_recv;
+  op++;
+  op->op = GRPC_OP_RECV_MESSAGE;
+  op->data.recv_message = &response_payload1_recv;
+  op++;
+  if (request_status_early) {
+    op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+    op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+    op->data.recv_status_on_client.status = &status;
+    op->data.recv_status_on_client.status_details = &details;
+    op->data.recv_status_on_client.status_details_capacity = &details_capacity;
+    op++;
+  }
+  error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
+                                 f.server, &s, &call_details,
+                                 &request_metadata_recv, f.cq, f.cq, tag(101)));
+  cq_expect_completion(cqv, tag(101), 1);
+  cq_verify(cqv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op++;
+  op->op = GRPC_OP_SEND_MESSAGE;
+  op->data.send_message = response_payload1;
+  op++;
+  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  cq_expect_completion(cqv, tag(102), 1);
+  cq_verify(cqv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_MESSAGE;
+  op->data.send_message = response_payload2;
+  op++;
+  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  cq_expect_completion(cqv, tag(103), 1);
+  if (!request_status_early) {
+    cq_expect_completion(cqv, tag(1), 1);
+  }
+  cq_verify(cqv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+  op->data.recv_close_on_server.cancelled = &was_cancelled;
+  op++;
+  op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+  op->data.send_status_from_server.trailing_metadata_count = 0;
+  op->data.send_status_from_server.status = GRPC_STATUS_FAILED_PRECONDITION;
+  op->data.send_status_from_server.status_details = "xyz";
+  op++;
+  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(104), NULL);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  if (!request_status_early) {
+    memset(ops, 0, sizeof(ops));
+    op = ops;
+    op->op = GRPC_OP_RECV_MESSAGE;
+    op->data.recv_message = &response_payload2_recv;
+    op++;
+    error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(2), NULL);
+    GPR_ASSERT(GRPC_CALL_OK == error);
+  }
+
+  cq_expect_completion(cqv, tag(104), 1);
+  if (request_status_early) {
+    cq_expect_completion(cqv, tag(1), 1);
+  } else {
+    cq_expect_completion(cqv, tag(2), 1);
+  }
+  cq_verify(cqv);
+
+  if (!request_status_early) {
+    memset(ops, 0, sizeof(ops));
+    op = ops;
+    op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+    op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+    op->data.recv_status_on_client.status = &status;
+    op->data.recv_status_on_client.status_details = &details;
+    op->data.recv_status_on_client.status_details_capacity = &details_capacity;
+    op++;
+    error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(3), NULL);
+    GPR_ASSERT(GRPC_CALL_OK == error);
+
+    cq_expect_completion(cqv, tag(3), 1);
+    cq_verify(cqv);
+
+    GPR_ASSERT(response_payload1_recv != NULL);
+    GPR_ASSERT(response_payload2_recv != NULL);
+  }
+
+  GPR_ASSERT(status == GRPC_STATUS_FAILED_PRECONDITION);
+  GPR_ASSERT(0 == strcmp(details, "xyz"));
+  GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
+  GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr"));
+  GPR_ASSERT(was_cancelled == 1);
+
+  gpr_free(details);
+  grpc_metadata_array_destroy(&initial_metadata_recv);
+  grpc_metadata_array_destroy(&trailing_metadata_recv);
+  grpc_metadata_array_destroy(&request_metadata_recv);
+  grpc_call_details_destroy(&call_details);
+
+  grpc_call_destroy(c);
+  grpc_call_destroy(s);
+
+  cq_verifier_destroy(cqv);
+
+  grpc_byte_buffer_destroy(response_payload1);
+  grpc_byte_buffer_destroy(response_payload2);
+  grpc_byte_buffer_destroy(response_payload1_recv);
+  grpc_byte_buffer_destroy(response_payload2_recv);
+
+  end_test(&f);
+  config.tear_down_data(&f);
+}
+
+void streaming_error_response(grpc_end2end_test_config config) {
+  test(config, false);
+  test(config, true);
+}
+
+void streaming_error_response_pre_init(void) {}
diff --git a/test/core/iomgr/ev_epoll_linux_test.c b/test/core/iomgr/ev_epoll_linux_test.c
new file mode 100644
index 0000000000000000000000000000000000000000..2547dc987187c73c78049f56bdbb45147ce72fe9
--- /dev/null
+++ b/test/core/iomgr/ev_epoll_linux_test.c
@@ -0,0 +1,244 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <grpc/support/port_platform.h>
+
+/* This test only relevant on linux systems where epoll() is available */
+#ifdef GPR_LINUX_EPOLL
+#include "src/core/lib/iomgr/ev_epoll_linux.h"
+#include "src/core/lib/iomgr/ev_posix.h"
+
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/iomgr.h"
+#include "test/core/util/test_config.h"
+
+typedef struct test_pollset {
+  grpc_pollset *pollset;
+  gpr_mu *mu;
+} test_pollset;
+
+typedef struct test_fd {
+  int inner_fd;
+  grpc_fd *fd;
+} test_fd;
+
+/* num_fds should be an even number */
+static void test_fd_init(test_fd *tfds, int *fds, int num_fds) {
+  int i;
+  for (i = 0; i < num_fds; i++) {
+    tfds[i].inner_fd = fds[i];
+    tfds[i].fd = grpc_fd_create(fds[i], "test_fd");
+  }
+}
+
+static void test_fd_cleanup(grpc_exec_ctx *exec_ctx, test_fd *tfds,
+                            int num_fds) {
+  int release_fd;
+  int i;
+
+  for (i = 0; i < num_fds; i++) {
+    grpc_fd_shutdown(exec_ctx, tfds[i].fd);
+    grpc_exec_ctx_flush(exec_ctx);
+
+    grpc_fd_orphan(exec_ctx, tfds[i].fd, NULL, &release_fd, "test_fd_cleanup");
+    grpc_exec_ctx_flush(exec_ctx);
+
+    GPR_ASSERT(release_fd == tfds[i].inner_fd);
+    close(tfds[i].inner_fd);
+  }
+}
+
+static void test_pollset_init(test_pollset *pollsets, int num_pollsets) {
+  int i;
+  for (i = 0; i < num_pollsets; i++) {
+    pollsets[i].pollset = gpr_malloc(grpc_pollset_size());
+    grpc_pollset_init(pollsets[i].pollset, &pollsets[i].mu);
+  }
+}
+
+static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p,
+                            grpc_error *error) {
+  grpc_pollset_destroy(p);
+}
+
+static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx,
+                                 test_pollset *pollsets, int num_pollsets) {
+  grpc_closure destroyed;
+  int i;
+
+  for (i = 0; i < num_pollsets; i++) {
+    grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset);
+    grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);
+
+    grpc_exec_ctx_flush(exec_ctx);
+    gpr_free(pollsets[i].pollset);
+  }
+}
+
+#define NUM_FDS 8
+#define NUM_POLLSETS 4
+/*
+ * Cases to test:
+ *  case 1) Polling islands of both fd and pollset are NULL
+ *  case 2) Polling island of fd is NULL but that of pollset is not-NULL
+ *  case 3) Polling island of fd is not-NULL but that of pollset is NULL
+ *  case 4) Polling islands of both fd and pollset are not-NULL and:
+ *     case 4.1) Polling islands of fd and pollset are equal
+ *     case 4.2) Polling islands of fd and pollset are NOT-equal (This results
+ *     in a merge)
+ * */
+static void test_add_fd_to_pollset() {
+  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  test_fd tfds[NUM_FDS];
+  int fds[NUM_FDS];
+  test_pollset pollsets[NUM_POLLSETS];
+  void *expected_pi = NULL;
+  int i;
+  int r;
+
+  /* Create some dummy file descriptors. Currently using pipe file descriptors
+   * for this test but we could use any other type of file descriptors. Also,
+   * since pipe() used in this test creates two fds in each call, NUM_FDS should
+   * be an even number */
+  for (i = 0; i < NUM_FDS; i = i + 2) {
+    r = pipe(fds + i);
+    if (r != 0) {
+      gpr_log(GPR_ERROR, "Error in creating pipe. %d (%s)", errno,
+              strerror(errno));
+      return;
+    }
+  }
+
+  test_fd_init(tfds, fds, NUM_FDS);
+  test_pollset_init(pollsets, NUM_POLLSETS);
+
+  /*Step 1.
+   * Create three polling islands (This will exercise test case 1 and 2) with
+   * the following configuration:
+   *   polling island 0 = { fds:0,1,2, pollsets:0}
+   *   polling island 1 = { fds:3,4,   pollsets:1}
+   *   polling island 2 = { fds:5,6,7  pollsets:2}
+   *
+   *Step 2.
+   * Add pollset 3 to polling island 0 (by adding fds 0 and 1 to pollset 3)
+   * (This will exercise test cases 3 and 4.1). The configuration becomes:
+   *   polling island 0 = { fds:0,1,2, pollsets:0,3} <<< pollset 3 added here
+   *   polling island 1 = { fds:3,4,   pollsets:1}
+   *   polling island 2 = { fds:5,6,7  pollsets:2}
+   *
+   *Step 3.
+   * Merge polling islands 0 and 1 by adding fd 0 to pollset 1 (This will
+   * exercise test case 4.2). The configuration becomes:
+   *   polling island (merged) = {fds: 0,1,2,3,4, pollsets: 0,1,3}
+   *   polling island 2 = {fds: 5,6,7 pollsets: 2}
+   *
+   *Step 4.
+   * Finally do one more merge by adding fd 3 to pollset 2.
+   *   polling island (merged) = {fds: 0,1,2,3,4,5,6,7, pollsets: 0,1,2,3}
+   */
+
+  /* == Step 1 == */
+  for (i = 0; i <= 2; i++) {
+    grpc_pollset_add_fd(&exec_ctx, pollsets[0].pollset, tfds[i].fd);
+    grpc_exec_ctx_flush(&exec_ctx);
+  }
+
+  for (i = 3; i <= 4; i++) {
+    grpc_pollset_add_fd(&exec_ctx, pollsets[1].pollset, tfds[i].fd);
+    grpc_exec_ctx_flush(&exec_ctx);
+  }
+
+  for (i = 5; i <= 7; i++) {
+    grpc_pollset_add_fd(&exec_ctx, pollsets[2].pollset, tfds[i].fd);
+    grpc_exec_ctx_flush(&exec_ctx);
+  }
+
+  /* == Step 2 == */
+  for (i = 0; i <= 1; i++) {
+    grpc_pollset_add_fd(&exec_ctx, pollsets[3].pollset, tfds[i].fd);
+    grpc_exec_ctx_flush(&exec_ctx);
+  }
+
+  /* == Step 3 == */
+  grpc_pollset_add_fd(&exec_ctx, pollsets[1].pollset, tfds[0].fd);
+  grpc_exec_ctx_flush(&exec_ctx);
+
+  /* == Step 4 == */
+  grpc_pollset_add_fd(&exec_ctx, pollsets[2].pollset, tfds[3].fd);
+  grpc_exec_ctx_flush(&exec_ctx);
+
+  /* All polling islands are merged at this point */
+
+  /* Compare Fd:0's polling island with that of all other Fds */
+  expected_pi = grpc_fd_get_polling_island(tfds[0].fd);
+  for (i = 1; i < NUM_FDS; i++) {
+    GPR_ASSERT(grpc_are_polling_islands_equal(
+        expected_pi, grpc_fd_get_polling_island(tfds[i].fd)));
+  }
+
+  /* Compare Fd:0's polling island with that of all other pollsets */
+  for (i = 0; i < NUM_POLLSETS; i++) {
+    GPR_ASSERT(grpc_are_polling_islands_equal(
+        expected_pi, grpc_pollset_get_polling_island(pollsets[i].pollset)));
+  }
+
+  test_fd_cleanup(&exec_ctx, tfds, NUM_FDS);
+  test_pollset_cleanup(&exec_ctx, pollsets, NUM_POLLSETS);
+  grpc_exec_ctx_finish(&exec_ctx);
+}
+
+int main(int argc, char **argv) {
+  const char *poll_strategy = NULL;
+  grpc_test_init(argc, argv);
+  grpc_iomgr_init();
+
+  poll_strategy = grpc_get_poll_strategy_name();
+  if (poll_strategy != NULL && strcmp(poll_strategy, "epoll") == 0) {
+    test_add_fd_to_pollset();
+  } else {
+    gpr_log(GPR_INFO,
+            "Skipping the test. The test is only relevant for 'epoll' "
+            "strategy. and the current strategy is: '%s'",
+            poll_strategy);
+  }
+  grpc_iomgr_shutdown();
+  return 0;
+}
+#else /* defined(GPR_LINUX_EPOLL) */
+int main(int argc, char **argv) { return 0; }
+#endif /* !defined(GPR_LINUX_EPOLL) */
diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c
index eda774fca5b910399f7617939bba5bbf73f6c624..6e2d1d0fc9e89b965eaaa138219876b8bd920cc8 100644
--- a/test/core/iomgr/tcp_server_posix_test.c
+++ b/test/core/iomgr/tcp_server_posix_test.c
@@ -129,7 +129,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
 static void test_no_op(void) {
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_tcp_server *s;
-  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   grpc_tcp_server_unref(&exec_ctx, s);
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -137,7 +137,7 @@ static void test_no_op(void) {
 static void test_no_op_with_start(void) {
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_tcp_server *s;
-  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   LOG_TEST("test_no_op_with_start");
   grpc_tcp_server_start(&exec_ctx, s, NULL, 0, on_connect, NULL);
   grpc_tcp_server_unref(&exec_ctx, s);
@@ -148,7 +148,7 @@ static void test_no_op_with_port(void) {
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   struct sockaddr_in addr;
   grpc_tcp_server *s;
-  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   LOG_TEST("test_no_op_with_port");
 
   memset(&addr, 0, sizeof(addr));
@@ -166,7 +166,7 @@ static void test_no_op_with_port_and_start(void) {
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   struct sockaddr_in addr;
   grpc_tcp_server *s;
-  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   LOG_TEST("test_no_op_with_port_and_start");
   int port;
 
@@ -226,7 +226,7 @@ static void test_connect(unsigned n) {
   unsigned svr1_fd_count;
   int svr1_port;
   grpc_tcp_server *s;
-  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   unsigned i;
   server_weak_ref weak_ref;
   server_weak_ref_init(&weak_ref);
diff --git a/test/core/network_benchmarks/low_level_ping_pong.c b/test/core/network_benchmarks/low_level_ping_pong.c
index 1b40895a7194028cbb6360d3092c16d449085acf..9038d076755fa74c05eecff92267b48d5062ac59 100644
--- a/test/core/network_benchmarks/low_level_ping_pong.c
+++ b/test/core/network_benchmarks/low_level_ping_pong.c
@@ -583,7 +583,7 @@ static int run_benchmark(char *socket_type, thread_args *client_args,
     return rv;
   }
 
-  gpr_log(GPR_INFO, "Starting test %s %s %d", client_args->strategy_name,
+  gpr_log(GPR_INFO, "Starting test %s %s %zu", client_args->strategy_name,
           socket_type, client_args->msg_size);
 
   gpr_thd_new(&tid, server_thread_wrap, server_args, NULL);
diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c
index e703dbdeb6174935c9be0eaa2970eb38ed4b4471..7043953154ae0515f19981e5d336b2af62b5e2ee 100644
--- a/test/core/security/credentials_test.c
+++ b/test/core/security/credentials_test.c
@@ -348,13 +348,15 @@ static void check_metadata(expected_md *expected, grpc_credentials_md *md_elems,
 static void check_google_iam_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
                                       grpc_credentials_md *md_elems,
                                       size_t num_md,
-                                      grpc_credentials_status status) {
+                                      grpc_credentials_status status,
+                                      const char *error_details) {
   grpc_call_credentials *c = (grpc_call_credentials *)user_data;
   expected_md emd[] = {{GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
                         test_google_iam_authorization_token},
                        {GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
                         test_google_iam_authority_selector}};
   GPR_ASSERT(status == GRPC_CREDENTIALS_OK);
+  GPR_ASSERT(error_details == NULL);
   GPR_ASSERT(num_md == 2);
   check_metadata(emd, md_elems, num_md);
   grpc_call_credentials_unref(c);
@@ -372,14 +374,13 @@ static void test_google_iam_creds(void) {
   grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void check_access_token_metadata(grpc_exec_ctx *exec_ctx,
-                                        void *user_data,
-                                        grpc_credentials_md *md_elems,
-                                        size_t num_md,
-                                        grpc_credentials_status status) {
+static void check_access_token_metadata(
+    grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   grpc_call_credentials *c = (grpc_call_credentials *)user_data;
   expected_md emd[] = {{GRPC_AUTHORIZATION_METADATA_KEY, "Bearer blah"}};
   GPR_ASSERT(status == GRPC_CREDENTIALS_OK);
+  GPR_ASSERT(error_details == NULL);
   GPR_ASSERT(num_md == 1);
   check_metadata(emd, md_elems, num_md);
   grpc_call_credentials_unref(c);
@@ -428,7 +429,7 @@ static void test_channel_oauth2_composite_creds(void) {
 
 static void check_oauth2_google_iam_composite_metadata(
     grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
-    size_t num_md, grpc_credentials_status status) {
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   grpc_call_credentials *c = (grpc_call_credentials *)user_data;
   expected_md emd[] = {
       {GRPC_AUTHORIZATION_METADATA_KEY, test_oauth2_bearer_token},
@@ -437,6 +438,7 @@ static void check_oauth2_google_iam_composite_metadata(
       {GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
        test_google_iam_authority_selector}};
   GPR_ASSERT(status == GRPC_CREDENTIALS_OK);
+  GPR_ASSERT(error_details == NULL);
   GPR_ASSERT(num_md == 3);
   check_metadata(emd, md_elems, num_md);
   grpc_call_credentials_unref(c);
@@ -521,8 +523,9 @@ static void test_channel_oauth2_google_iam_composite_creds(void) {
 
 static void on_oauth2_creds_get_metadata_success(
     grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
-    size_t num_md, grpc_credentials_status status) {
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   GPR_ASSERT(status == GRPC_CREDENTIALS_OK);
+  GPR_ASSERT(error_details == NULL);
   GPR_ASSERT(num_md == 1);
   GPR_ASSERT(gpr_slice_str_cmp(md_elems[0].key, "authorization") == 0);
   GPR_ASSERT(gpr_slice_str_cmp(md_elems[0].value,
@@ -534,7 +537,7 @@ static void on_oauth2_creds_get_metadata_success(
 
 static void on_oauth2_creds_get_metadata_failure(
     grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
-    size_t num_md, grpc_credentials_status status) {
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   GPR_ASSERT(status == GRPC_CREDENTIALS_ERROR);
   GPR_ASSERT(num_md == 0);
   GPR_ASSERT(user_data != NULL);
@@ -769,14 +772,13 @@ static char *encode_and_sign_jwt_should_not_be_called(
   return NULL;
 }
 
-static void on_jwt_creds_get_metadata_success(grpc_exec_ctx *exec_ctx,
-                                              void *user_data,
-                                              grpc_credentials_md *md_elems,
-                                              size_t num_md,
-                                              grpc_credentials_status status) {
+static void on_jwt_creds_get_metadata_success(
+    grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   char *expected_md_value;
   gpr_asprintf(&expected_md_value, "Bearer %s", test_signed_jwt);
   GPR_ASSERT(status == GRPC_CREDENTIALS_OK);
+  GPR_ASSERT(error_details == NULL);
   GPR_ASSERT(num_md == 1);
   GPR_ASSERT(gpr_slice_str_cmp(md_elems[0].key, "authorization") == 0);
   GPR_ASSERT(gpr_slice_str_cmp(md_elems[0].value, expected_md_value) == 0);
@@ -785,11 +787,9 @@ static void on_jwt_creds_get_metadata_success(grpc_exec_ctx *exec_ctx,
   gpr_free(expected_md_value);
 }
 
-static void on_jwt_creds_get_metadata_failure(grpc_exec_ctx *exec_ctx,
-                                              void *user_data,
-                                              grpc_credentials_md *md_elems,
-                                              size_t num_md,
-                                              grpc_credentials_status status) {
+static void on_jwt_creds_get_metadata_failure(
+    grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   GPR_ASSERT(status == GRPC_CREDENTIALS_ERROR);
   GPR_ASSERT(num_md == 0);
   GPR_ASSERT(user_data != NULL);
@@ -1033,6 +1033,8 @@ static void plugin_get_metadata_success(void *state,
   cb(user_data, md, GPR_ARRAY_SIZE(md), GRPC_STATUS_OK, NULL);
 }
 
+static const char *plugin_error_details = "Could not get metadata for plugin.";
+
 static void plugin_get_metadata_failure(void *state,
                                         grpc_auth_metadata_context context,
                                         grpc_credentials_plugin_metadata_cb cb,
@@ -1043,13 +1045,12 @@ static void plugin_get_metadata_failure(void *state,
   GPR_ASSERT(context.channel_auth_context == NULL);
   GPR_ASSERT(context.reserved == NULL);
   *s = PLUGIN_GET_METADATA_CALLED_STATE;
-  cb(user_data, NULL, 0, GRPC_STATUS_UNAUTHENTICATED,
-     "Could not get metadata for plugin.");
+  cb(user_data, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, plugin_error_details);
 }
 
 static void on_plugin_metadata_received_success(
     grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
-    size_t num_md, grpc_credentials_status status) {
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   size_t i = 0;
   GPR_ASSERT(user_data == NULL);
   GPR_ASSERT(md_elems != NULL);
@@ -1062,11 +1063,13 @@ static void on_plugin_metadata_received_success(
 
 static void on_plugin_metadata_received_failure(
     grpc_exec_ctx *exec_ctx, void *user_data, grpc_credentials_md *md_elems,
-    size_t num_md, grpc_credentials_status status) {
+    size_t num_md, grpc_credentials_status status, const char *error_details) {
   GPR_ASSERT(user_data == NULL);
   GPR_ASSERT(md_elems == NULL);
   GPR_ASSERT(num_md == 0);
   GPR_ASSERT(status == GRPC_CREDENTIALS_ERROR);
+  GPR_ASSERT(error_details != NULL);
+  GPR_ASSERT(strcmp(error_details, plugin_error_details) == 0);
 }
 
 static void plugin_destroy(void *state) {
diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c
index a334edc32dcae57133b4dadc4b516b74cef83f75..9b97c38fcb08070f2a59e69b6516bdf3aeb038ee 100644
--- a/test/core/security/oauth2_utils.c
+++ b/test/core/security/oauth2_utils.c
@@ -53,7 +53,8 @@ typedef struct {
 
 static void on_oauth2_response(grpc_exec_ctx *exec_ctx, void *user_data,
                                grpc_credentials_md *md_elems, size_t num_md,
-                               grpc_credentials_status status) {
+                               grpc_credentials_status status,
+                               const char *error_details) {
   oauth2_request *request = user_data;
   char *token = NULL;
   gpr_slice token_slice;
diff --git a/test/core/security/print_google_default_creds_token.c b/test/core/security/print_google_default_creds_token.c
index 18fbc3c41c658745cc4ed50d8f911ac77373ad4a..a391c0876b0dcc0838a1f5fe24008e72f62ba9ae 100644
--- a/test/core/security/print_google_default_creds_token.c
+++ b/test/core/security/print_google_default_creds_token.c
@@ -54,7 +54,8 @@ typedef struct {
 
 static void on_metadata_response(grpc_exec_ctx *exec_ctx, void *user_data,
                                  grpc_credentials_md *md_elems, size_t num_md,
-                                 grpc_credentials_status status) {
+                                 grpc_credentials_status status,
+                                 const char *error_details) {
   synchronizer *sync = user_data;
   if (status == GRPC_CREDENTIALS_ERROR) {
     fprintf(stderr, "Fetching token failed.\n");
diff --git a/test/core/surface/byte_buffer_reader_test.c b/test/core/surface/byte_buffer_reader_test.c
index 9c6734e1799118a1b33287b76336d5bee165225f..1ab1a06211e604dab2b5f03d09329a84451a49ca 100644
--- a/test/core/surface/byte_buffer_reader_test.c
+++ b/test/core/surface/byte_buffer_reader_test.c
@@ -59,7 +59,8 @@ static void test_read_one_slice(void) {
   slice = gpr_slice_from_copied_string("test");
   buffer = grpc_raw_byte_buffer_create(&slice, 1);
   gpr_slice_unref(slice);
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
   first_code = grpc_byte_buffer_reader_next(&reader, &first_slice);
   GPR_ASSERT(first_code != 0);
   GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0);
@@ -81,7 +82,8 @@ static void test_read_one_slice_malloc(void) {
   memcpy(GPR_SLICE_START_PTR(slice), "test", 4);
   buffer = grpc_raw_byte_buffer_create(&slice, 1);
   gpr_slice_unref(slice);
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
   first_code = grpc_byte_buffer_reader_next(&reader, &first_slice);
   GPR_ASSERT(first_code != 0);
   GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0);
@@ -102,7 +104,8 @@ static void test_read_none_compressed_slice(void) {
   slice = gpr_slice_from_copied_string("test");
   buffer = grpc_raw_byte_buffer_create(&slice, 1);
   gpr_slice_unref(slice);
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
   first_code = grpc_byte_buffer_reader_next(&reader, &first_slice);
   GPR_ASSERT(first_code != 0);
   GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0);
@@ -112,6 +115,20 @@ static void test_read_none_compressed_slice(void) {
   grpc_byte_buffer_destroy(buffer);
 }
 
+static void test_read_corrupted_slice(void) {
+  gpr_slice slice;
+  grpc_byte_buffer *buffer;
+  grpc_byte_buffer_reader reader;
+
+  LOG_TEST("test_read_corrupted_slice");
+  slice = gpr_slice_from_copied_string("test");
+  buffer = grpc_raw_byte_buffer_create(&slice, 1);
+  buffer->data.raw.compression = GRPC_COMPRESS_GZIP; /* lies! */
+  gpr_slice_unref(slice);
+  GPR_ASSERT(!grpc_byte_buffer_reader_init(&reader, buffer));
+  grpc_byte_buffer_destroy(buffer);
+}
+
 static void read_compressed_slice(grpc_compression_algorithm algorithm,
                                   size_t input_size) {
   gpr_slice input_slice;
@@ -132,7 +149,8 @@ static void read_compressed_slice(grpc_compression_algorithm algorithm,
 
   buffer = grpc_raw_compressed_byte_buffer_create(sliceb_out.slices,
                                                   sliceb_out.count, algorithm);
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
 
   while (grpc_byte_buffer_reader_next(&reader, &read_slice)) {
     GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(read_slice),
@@ -170,7 +188,8 @@ static void test_byte_buffer_from_reader(void) {
   memcpy(GPR_SLICE_START_PTR(slice), "test", 4);
   buffer = grpc_raw_byte_buffer_create(&slice, 1);
   gpr_slice_unref(slice);
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
 
   buffer_from_reader = grpc_raw_byte_buffer_from_reader(&reader);
   GPR_ASSERT(buffer->type == buffer_from_reader->type);
@@ -206,7 +225,8 @@ static void test_readall(void) {
   gpr_slice_unref(slices[0]);
   gpr_slice_unref(slices[1]);
 
-  grpc_byte_buffer_reader_init(&reader, buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
   slice_out = grpc_byte_buffer_reader_readall(&reader);
 
   GPR_ASSERT(GPR_SLICE_LENGTH(slice_out) == 512 + 1024);
@@ -241,7 +261,8 @@ static void test_byte_buffer_copy(void) {
   gpr_slice_unref(slices[1]);
   copied_buffer = grpc_byte_buffer_copy(buffer);
 
-  grpc_byte_buffer_reader_init(&reader, copied_buffer);
+  GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, buffer) &&
+             "Couldn't init byte buffer reader");
   slice_out = grpc_byte_buffer_reader_readall(&reader);
 
   GPR_ASSERT(GPR_SLICE_LENGTH(slice_out) == 512 + 1024);
@@ -260,6 +281,7 @@ int main(int argc, char **argv) {
   test_read_none_compressed_slice();
   test_read_gzip_compressed_slice();
   test_read_deflate_compressed_slice();
+  test_read_corrupted_slice();
   test_byte_buffer_from_reader();
   test_byte_buffer_copy();
   test_readall();
diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c
index f447a68887502080cd1fa072432f64d02041e4fa..f7567f350d7facb3127885fdc70ef21330994ff6 100644
--- a/test/core/surface/concurrent_connectivity_test.c
+++ b/test/core/surface/concurrent_connectivity_test.c
@@ -113,7 +113,7 @@ void bad_server_thread(void *vargs) {
   socklen_t addr_len = sizeof(addr);
   int port;
   grpc_tcp_server *s;
-  grpc_error *error = grpc_tcp_server_create(NULL, &s);
+  grpc_error *error = grpc_tcp_server_create(NULL, NULL, &s);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   memset(&addr, 0, sizeof(addr));
   addr.ss_family = AF_INET;
diff --git a/test/core/surface/sequential_connectivity_test.c b/test/core/surface/sequential_connectivity_test.c
index 2fba3927ba7206bb63ddcdc4cefe202f1ea3a36f..fe87f119f2f1a9eaaa6226ad514f14f5cd6c694e 100644
--- a/test/core/surface/sequential_connectivity_test.c
+++ b/test/core/surface/sequential_connectivity_test.c
@@ -154,7 +154,7 @@ static void secure_test_add_port(grpc_server *server, const char *addr) {
 
 static grpc_channel *secure_test_create_channel(const char *addr) {
   grpc_channel_credentials *ssl_creds =
-      grpc_ssl_credentials_create(NULL, NULL, NULL);
+      grpc_ssl_credentials_create(test_root_cert, NULL, NULL);
   grpc_arg ssl_name_override = {GRPC_ARG_STRING,
                                 GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
                                 {"foo.test.google.fr"}};
diff --git a/test/core/surface/server_chttp2_test.c b/test/core/surface/server_chttp2_test.c
index f42ca9f9cdfb87d8847f2743108b36511655e4ac..6310b6f00b0100bcb9c9197a18cdf8f113fcec67 100644
--- a/test/core/surface/server_chttp2_test.c
+++ b/test/core/surface/server_chttp2_test.c
@@ -49,10 +49,16 @@ void test_unparsable_target(void) {
 }
 
 void test_add_same_port_twice() {
+  grpc_arg a;
+  a.type = GRPC_ARG_INTEGER;
+  a.key = GRPC_ARG_ALLOW_REUSEPORT;
+  a.value.integer = 0;
+  grpc_channel_args args = {1, &a};
+
   int port = grpc_pick_unused_port_or_die();
   char *addr = NULL;
   grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
-  grpc_server *server = grpc_server_create(NULL, NULL);
+  grpc_server *server = grpc_server_create(&args, NULL);
   grpc_server_credentials *fake_creds =
       grpc_fake_transport_security_server_credentials_create();
   gpr_join_host_port(&addr, "localhost", port);
diff --git a/test/core/surface/server_test.c b/test/core/surface/server_test.c
index 02eb432e2d281867f4baa3dfe50258f1768e8a7f..3fd1c2c26636b07445a258b04648572abbf664c1 100644
--- a/test/core/surface/server_test.c
+++ b/test/core/surface/server_test.c
@@ -82,9 +82,15 @@ void test_request_call_on_no_server_cq(void) {
 }
 
 void test_bind_server_twice(void) {
+  grpc_arg a;
+  a.type = GRPC_ARG_INTEGER;
+  a.key = GRPC_ARG_ALLOW_REUSEPORT;
+  a.value.integer = 0;
+  grpc_channel_args args = {1, &a};
+
   char *addr;
-  grpc_server *server1 = grpc_server_create(NULL, NULL);
-  grpc_server *server2 = grpc_server_create(NULL, NULL);
+  grpc_server *server1 = grpc_server_create(&args, NULL);
+  grpc_server *server2 = grpc_server_create(&args, NULL);
   grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
   int port = grpc_pick_unused_port_or_die();
   gpr_asprintf(&addr, "[::]:%d", port);
@@ -133,7 +139,7 @@ void test_bind_server_to_addr(const char *host, bool secure) {
 }
 
 static int external_dns_works(const char *host) {
-  grpc_resolved_addresses *res;
+  grpc_resolved_addresses *res = NULL;
   grpc_error *error = grpc_blocking_resolve_address(host, "80", &res);
   GRPC_ERROR_UNREF(error);
   if (res != NULL) {
diff --git a/test/core/transport/chttp2/status_conversion_test.c b/test/core/transport/chttp2/status_conversion_test.c
index e6fc7857281515a7339a270d2ecd72d200c2e751..f5a5cd1395ba79b1c4e178bbb49029985bbb9236 100644
--- a/test/core/transport/chttp2/status_conversion_test.c
+++ b/test/core/transport/chttp2/status_conversion_test.c
@@ -37,8 +37,8 @@
 
 #define GRPC_STATUS_TO_HTTP2_ERROR(a, b) \
   GPR_ASSERT(grpc_chttp2_grpc_status_to_http2_error(a) == (b))
-#define HTTP2_ERROR_TO_GRPC_STATUS(a, b) \
-  GPR_ASSERT(grpc_chttp2_http2_error_to_grpc_status(a) == (b))
+#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b) \
+  GPR_ASSERT(grpc_chttp2_http2_error_to_grpc_status(a, deadline) == (b))
 #define GRPC_STATUS_TO_HTTP2_STATUS(a, b) \
   GPR_ASSERT(grpc_chttp2_grpc_status_to_http2_status(a) == (b))
 #define HTTP2_STATUS_TO_GRPC_STATUS(a, b) \
@@ -54,8 +54,7 @@ int main(int argc, char **argv) {
   GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_UNKNOWN, GRPC_CHTTP2_INTERNAL_ERROR);
   GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_INVALID_ARGUMENT,
                              GRPC_CHTTP2_INTERNAL_ERROR);
-  GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_DEADLINE_EXCEEDED,
-                             GRPC_CHTTP2_INTERNAL_ERROR);
+  GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_DEADLINE_EXCEEDED, GRPC_CHTTP2_CANCEL);
   GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_NOT_FOUND, GRPC_CHTTP2_INTERNAL_ERROR);
   GRPC_STATUS_TO_HTTP2_ERROR(GRPC_STATUS_ALREADY_EXISTS,
                              GRPC_CHTTP2_INTERNAL_ERROR);
@@ -95,25 +94,60 @@ int main(int argc, char **argv) {
   GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_UNAVAILABLE, 200);
   GRPC_STATUS_TO_HTTP2_STATUS(GRPC_STATUS_DATA_LOSS, 200);
 
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR,
+  const gpr_timespec before_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, before_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, before_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, before_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, before_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, before_deadline,
                              GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT,
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, before_deadline,
                              GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR,
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, before_deadline,
                              GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM,
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, before_deadline,
                              GRPC_STATUS_UNAVAILABLE);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, GRPC_STATUS_CANCELLED);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR,
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, before_deadline,
+                             GRPC_STATUS_CANCELLED);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, before_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, before_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, before_deadline,
+                             GRPC_STATUS_RESOURCE_EXHAUSTED);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, before_deadline,
+                             GRPC_STATUS_PERMISSION_DENIED);
+
+  const gpr_timespec after_deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_NO_ERROR, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_PROTOCOL_ERROR, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INTERNAL_ERROR, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FLOW_CONTROL_ERROR, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_SETTINGS_TIMEOUT, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_STREAM_CLOSED, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_FRAME_SIZE_ERROR, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_REFUSED_STREAM, after_deadline,
+                             GRPC_STATUS_UNAVAILABLE);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CANCEL, after_deadline,
+                             GRPC_STATUS_DEADLINE_EXCEEDED);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_COMPRESSION_ERROR, after_deadline,
+                             GRPC_STATUS_INTERNAL);
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, after_deadline,
                              GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_CONNECT_ERROR, GRPC_STATUS_INTERNAL);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM,
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_ENHANCE_YOUR_CALM, after_deadline,
                              GRPC_STATUS_RESOURCE_EXHAUSTED);
-  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY,
+  HTTP2_ERROR_TO_GRPC_STATUS(GRPC_CHTTP2_INADEQUATE_SECURITY, after_deadline,
                              GRPC_STATUS_PERMISSION_DENIED);
 
   HTTP2_STATUS_TO_GRPC_STATUS(200, GRPC_STATUS_OK);
diff --git a/test/core/util/test_tcp_server.c b/test/core/util/test_tcp_server.c
index 27c16fc764a457ed77a6c9594ba750821a81eb69..8a0b3932d86540f2e90d77f73ccaf39477533e04 100644
--- a/test/core/util/test_tcp_server.c
+++ b/test/core/util/test_tcp_server.c
@@ -72,8 +72,8 @@ void test_tcp_server_start(test_tcp_server *server, int port) {
   addr.sin_port = htons((uint16_t)port);
   memset(&addr.sin_addr, 0, sizeof(addr.sin_addr));
 
-  grpc_error *error =
-      grpc_tcp_server_create(&server->shutdown_complete, &server->tcp_server);
+  grpc_error *error = grpc_tcp_server_create(&server->shutdown_complete, NULL,
+                                             &server->tcp_server);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   error = grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr),
                                    &port_added);
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index 8de9d339f635d8c608ef32f5ceed1c8375496c87..354a59cedd59b461dfa94e72cee56bab67313353 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -75,6 +75,8 @@ bool CheckIsLocalhost(const grpc::string& addr) {
          addr.substr(0, kIpv6.size()) == kIpv6;
 }
 
+const char kTestCredsPluginErrorMsg[] = "Could not find plugin metadata.";
+
 class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
  public:
   static const char kMetadataKey[];
@@ -99,7 +101,7 @@ class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
       metadata->insert(std::make_pair(kMetadataKey, metadata_value_));
       return Status::OK;
     } else {
-      return Status(StatusCode::NOT_FOUND, "Could not find plugin metadata.");
+      return Status(StatusCode::NOT_FOUND, kTestCredsPluginErrorMsg);
     }
   }
 
@@ -1331,6 +1333,7 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
   Status s = stub_->Echo(&context, request, &response);
   EXPECT_FALSE(s.ok());
   EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
+  EXPECT_EQ(s.error_message(), kTestCredsPluginErrorMsg);
 }
 
 TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
@@ -1388,6 +1391,7 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
   Status s = stub_->Echo(&context, request, &response);
   EXPECT_FALSE(s.ok());
   EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
+  EXPECT_EQ(s.error_message(), kTestCredsPluginErrorMsg);
 }
 
 TEST_P(SecureEnd2endTest, ClientAuthContext) {
diff --git a/test/cpp/end2end/server_builder_plugin_test.cc b/test/cpp/end2end/server_builder_plugin_test.cc
index 75f23b64a73d7e4386c1a6edaa04aa46bc88a308..778a2be573ea7f6609d1b6e15e089e82a83382ee 100644
--- a/test/cpp/end2end/server_builder_plugin_test.cc
+++ b/test/cpp/end2end/server_builder_plugin_test.cc
@@ -37,6 +37,7 @@
 #include <grpc++/impl/server_builder_option.h>
 #include <grpc++/impl/server_builder_plugin.h>
 #include <grpc++/impl/server_initializer.h>
+#include <grpc++/impl/thd.h>
 #include <grpc++/security/credentials.h>
 #include <grpc++/security/server_credentials.h>
 #include <grpc++/server.h>
@@ -187,7 +188,10 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
   void StartServer() {
     grpc::string server_address = "localhost:" + to_string(port_);
     builder_->AddListeningPort(server_address, InsecureServerCredentials());
+    // we run some tests without a service, and for those we need to supply a
+    // frequently polled completion queue
     cq_ = builder_->AddCompletionQueue();
+    cq_thread_ = grpc::thread(std::bind(&ServerBuilderPluginTest::RunCQ, this));
     server_ = builder_->BuildAndStart();
     EXPECT_TRUE(CheckPresent());
   }
@@ -204,11 +208,8 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
     EXPECT_TRUE(plugin->init_server_is_called());
     EXPECT_TRUE(plugin->finish_is_called());
     server_->Shutdown();
-    void* tag;
-    bool ok;
     cq_->Shutdown();
-    while (cq_->Next(&tag, &ok))
-      ;
+    cq_thread_.join();
   }
 
   string to_string(const int number) {
@@ -223,6 +224,7 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
   std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
   std::unique_ptr<ServerCompletionQueue> cq_;
   std::unique_ptr<Server> server_;
+  grpc::thread cq_thread_;
   TestServiceImpl service_;
   int port_;
 
@@ -238,6 +240,13 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
       return nullptr;
     }
   }
+
+  void RunCQ() {
+    void* tag;
+    bool ok;
+    while (cq_->Next(&tag, &ok))
+      ;
+  }
 };
 
 TEST_P(ServerBuilderPluginTest, PluginWithoutServiceTest) {
diff --git a/test/cpp/end2end/shutdown_test.cc b/test/cpp/end2end/shutdown_test.cc
index aa8d42141d49d4e04405aeedfda71d12228a93e3..3f98de6db7bb6bd78756814b4c229b755fac2448 100644
--- a/test/cpp/end2end/shutdown_test.cc
+++ b/test/cpp/end2end/shutdown_test.cc
@@ -123,7 +123,6 @@ class ShutdownTest : public ::testing::Test {
   TestServiceImpl service_;
 };
 
-// Tests zookeeper state change between two RPCs
 // TODO(ctiller): leaked objects in this test
 TEST_F(ShutdownTest, ShutdownTest) {
   ResetStub();
diff --git a/test/cpp/end2end/zookeeper_test.cc b/test/cpp/end2end/zookeeper_test.cc
deleted file mode 100644
index fdc500e53521fff2de539c4acc6421a5e55b3d49..0000000000000000000000000000000000000000
--- a/test/cpp/end2end/zookeeper_test.cc
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <grpc++/channel.h>
-#include <grpc++/client_context.h>
-#include <grpc++/create_channel.h>
-#include <grpc++/server.h>
-#include <grpc++/server_builder.h>
-#include <grpc++/server_context.h>
-#include <grpc/grpc.h>
-#include <grpc/grpc_zookeeper.h>
-#include <gtest/gtest.h>
-#include <zookeeper/zookeeper.h>
-
-#include "src/core/lib/support/env.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-
-namespace grpc {
-namespace testing {
-
-class ZookeeperTestServiceImpl
-    : public ::grpc::testing::EchoTestService::Service {
- public:
-  Status Echo(ServerContext* context, const EchoRequest* request,
-              EchoResponse* response) GRPC_OVERRIDE {
-    response->set_message(request->message());
-    return Status::OK;
-  }
-};
-
-class ZookeeperTest : public ::testing::Test {
- protected:
-  ZookeeperTest() {}
-
-  void SetUp() GRPC_OVERRIDE {
-    SetUpZookeeper();
-
-    // Sets up two servers
-    int port1 = grpc_pick_unused_port_or_die();
-    server1_ = SetUpServer(port1);
-
-    int port2 = grpc_pick_unused_port_or_die();
-    server2_ = SetUpServer(port2);
-
-    // Registers service /test in zookeeper
-    RegisterService("/test", "test");
-
-    // Registers service instance /test/1 in zookeeper
-    string value =
-        "{\"host\":\"localhost\",\"port\":\"" + to_string(port1) + "\"}";
-    RegisterService("/test/1", value);
-
-    // Registers service instance /test/2 in zookeeper
-    value = "{\"host\":\"localhost\",\"port\":\"" + to_string(port2) + "\"}";
-    RegisterService("/test/2", value);
-  }
-
-  // Requires zookeeper server running
-  void SetUpZookeeper() {
-    // Finds zookeeper server address in environment
-    // Default is localhost:2181
-    zookeeper_address_ = "localhost:2181";
-    char* addr = gpr_getenv("GRPC_ZOOKEEPER_SERVER_TEST");
-    if (addr != NULL) {
-      string addr_str(addr);
-      zookeeper_address_ = addr_str;
-      gpr_free(addr);
-    }
-    gpr_log(GPR_DEBUG, "%s", zookeeper_address_.c_str());
-
-    // Connects to zookeeper server
-    zoo_set_debug_level(ZOO_LOG_LEVEL_WARN);
-    zookeeper_handle_ =
-        zookeeper_init(zookeeper_address_.c_str(), NULL, 15000, 0, 0, 0);
-    GPR_ASSERT(zookeeper_handle_ != NULL);
-
-    // Registers zookeeper name resolver in grpc
-    grpc_zookeeper_register();
-  }
-
-  std::unique_ptr<Server> SetUpServer(const int port) {
-    string server_address = "localhost:" + to_string(port);
-
-    ServerBuilder builder;
-    builder.AddListeningPort(server_address, InsecureServerCredentials());
-    builder.RegisterService(&service_);
-    std::unique_ptr<Server> server = builder.BuildAndStart();
-    return server;
-  }
-
-  void RegisterService(const string& name, const string& value) {
-    char* path = (char*)gpr_malloc(name.size());
-
-    int status = zoo_exists(zookeeper_handle_, name.c_str(), 0, NULL);
-    if (status == ZNONODE) {
-      status =
-          zoo_create(zookeeper_handle_, name.c_str(), value.c_str(),
-                     value.size(), &ZOO_OPEN_ACL_UNSAFE, 0, path, name.size());
-    } else {
-      status = zoo_set(zookeeper_handle_, name.c_str(), value.c_str(),
-                       value.size(), -1);
-    }
-    gpr_free(path);
-    GPR_ASSERT(status == 0);
-  }
-
-  void DeleteService(const string& name) {
-    int status = zoo_delete(zookeeper_handle_, name.c_str(), -1);
-    GPR_ASSERT(status == 0);
-  }
-
-  void ChangeZookeeperState() {
-    server1_->Shutdown();
-    DeleteService("/test/1");
-  }
-
-  void TearDown() GRPC_OVERRIDE {
-    server1_->Shutdown();
-    server2_->Shutdown();
-    zookeeper_close(zookeeper_handle_);
-  }
-
-  void ResetStub() {
-    string target = "zookeeper://" + zookeeper_address_ + "/test";
-    channel_ = CreateChannel(target, InsecureChannelCredentials());
-    stub_ = grpc::testing::EchoTestService::NewStub(channel_);
-  }
-
-  string to_string(const int number) {
-    std::stringstream strs;
-    strs << number;
-    return strs.str();
-  }
-
-  std::shared_ptr<Channel> channel_;
-  std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
-  std::unique_ptr<Server> server1_;
-  std::unique_ptr<Server> server2_;
-  ZookeeperTestServiceImpl service_;
-  zhandle_t* zookeeper_handle_;
-  string zookeeper_address_;
-};
-
-// Tests zookeeper state change between two RPCs
-// TODO(ctiller): leaked objects in this test
-TEST_F(ZookeeperTest, ZookeeperStateChangeTwoRpc) {
-  ResetStub();
-
-  // First RPC
-  EchoRequest request1;
-  EchoResponse response1;
-  ClientContext context1;
-  context1.set_authority("test");
-  request1.set_message("Hello");
-  Status s1 = stub_->Echo(&context1, request1, &response1);
-  EXPECT_EQ(response1.message(), request1.message());
-  EXPECT_TRUE(s1.ok());
-
-  // Zookeeper state changes
-  gpr_log(GPR_DEBUG, "Zookeeper state change");
-  ChangeZookeeperState();
-  // Waits for re-resolving addresses
-  // TODO(ctiller): RPC will probably fail if not waiting
-  sleep(1);
-
-  // Second RPC
-  EchoRequest request2;
-  EchoResponse response2;
-  ClientContext context2;
-  context2.set_authority("test");
-  request2.set_message("World");
-  Status s2 = stub_->Echo(&context2, request2, &response2);
-  EXPECT_EQ(response2.message(), request2.message());
-  EXPECT_TRUE(s2.ok());
-}
-
-}  // namespace testing
-}  // namespace grpc
-
-int main(int argc, char** argv) {
-  grpc_test_init(argc, argv);
-  ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc
index addaf174f2b332d4f3a00a4d4162bed34c45487a..e8ae6ee572325288b985596ec60d0da2d4d4894f 100644
--- a/test/cpp/interop/client.cc
+++ b/test/cpp/interop/client.cc
@@ -40,7 +40,9 @@
 #include <grpc++/client_context.h>
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
+#include <grpc/support/useful.h>
 
+#include "src/core/lib/support/string.h"
 #include "test/cpp/interop/client_helper.h"
 #include "test/cpp/interop/interop_client.h"
 #include "test/cpp/util/test_config.h"
@@ -52,30 +54,31 @@ DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
 DEFINE_string(server_host_override, "foo.test.google.fr",
               "Override the server host which is sent in HTTP header");
 DEFINE_string(test_case, "large_unary",
-              "Configure different test cases. Valid options are: "
-              "empty_unary : empty (zero bytes) request and response; "
-              "large_unary : single request and (large) response; "
-              "large_compressed_unary : single request and compressed (large) "
-              "response; "
-              "client_streaming : request streaming with single response; "
-              "server_streaming : single request with response streaming; "
+              "Configure different test cases. Valid options are:\n\n"
+              "all : all test cases;\n"
+              "cancel_after_begin : cancel stream after starting it;\n"
+              "cancel_after_first_response: cancel on first response;\n"
+              "client_compressed_streaming : compressed request streaming with "
+              "client_compressed_unary : single compressed request;\n"
+              "client_streaming : request streaming with single response;\n"
+              "compute_engine_creds: large_unary with compute engine auth;\n"
+              "custom_metadata: server will echo custom metadata;\n"
+              "empty_stream : bi-di stream with no request/response;\n"
+              "empty_unary : empty (zero bytes) request and response;\n"
+              "half_duplex : half-duplex streaming;\n"
+              "jwt_token_creds: large_unary with JWT token auth;\n"
+              "large_unary : single request and (large) response;\n"
+              "oauth2_auth_token: raw oauth2 access token auth;\n"
+              "per_rpc_creds: raw oauth2 access token on a single rpc;\n"
+              "ping_pong : full-duplex streaming;\n"
+              "response streaming;\n"
               "server_compressed_streaming : single request with compressed "
-              "response streaming; "
-              "slow_consumer : single request with response; "
-              " streaming with slow client consumer; "
-              "half_duplex : half-duplex streaming; "
-              "ping_pong : full-duplex streaming; "
-              "cancel_after_begin : cancel stream after starting it; "
-              "cancel_after_first_response: cancel on first response; "
-              "timeout_on_sleeping_server: deadline exceeds on stream; "
-              "empty_stream : bi-di stream with no request/response; "
-              "compute_engine_creds: large_unary with compute engine auth; "
-              "jwt_token_creds: large_unary with JWT token auth; "
-              "oauth2_auth_token: raw oauth2 access token auth; "
-              "per_rpc_creds: raw oauth2 access token on a single rpc; "
-              "status_code_and_message: verify status code & message; "
-              "custom_metadata: server will echo custom metadata;"
-              "all : all of above.");
+              "server_compressed_unary : single compressed response;\n"
+              "server_streaming : single request with response streaming;\n"
+              "slow_consumer : single request with response streaming with "
+              "slow client consumer;\n"
+              "status_code_and_message: verify status code & message;\n"
+              "timeout_on_sleeping_server: deadline exceeds on stream;\n");
 DEFINE_string(default_service_account, "",
               "Email of GCE default service account");
 DEFINE_string(service_account_key_file, "",
@@ -104,14 +107,18 @@ int main(int argc, char** argv) {
     client.DoEmpty();
   } else if (FLAGS_test_case == "large_unary") {
     client.DoLargeUnary();
-  } else if (FLAGS_test_case == "large_compressed_unary") {
-    client.DoLargeCompressedUnary();
+  } else if (FLAGS_test_case == "server_compressed_unary") {
+    client.DoServerCompressedUnary();
+  } else if (FLAGS_test_case == "client_compressed_unary") {
+    client.DoClientCompressedUnary();
   } else if (FLAGS_test_case == "client_streaming") {
     client.DoRequestStreaming();
   } else if (FLAGS_test_case == "server_streaming") {
     client.DoResponseStreaming();
   } else if (FLAGS_test_case == "server_compressed_streaming") {
-    client.DoResponseCompressedStreaming();
+    client.DoServerCompressedStreaming();
+  } else if (FLAGS_test_case == "client_compressed_streaming") {
+    client.DoClientCompressedStreaming();
   } else if (FLAGS_test_case == "slow_consumer") {
     client.DoResponseStreamingWithSlowConsumer();
   } else if (FLAGS_test_case == "half_duplex") {
@@ -144,9 +151,12 @@ int main(int argc, char** argv) {
   } else if (FLAGS_test_case == "all") {
     client.DoEmpty();
     client.DoLargeUnary();
+    client.DoClientCompressedUnary();
+    client.DoServerCompressedUnary();
     client.DoRequestStreaming();
     client.DoResponseStreaming();
-    client.DoResponseCompressedStreaming();
+    client.DoClientCompressedStreaming();
+    client.DoServerCompressedStreaming();
     client.DoHalfDuplex();
     client.DoPingPong();
     client.DoCancelAfterBegin();
@@ -165,15 +175,35 @@ int main(int argc, char** argv) {
     }
     // compute_engine_creds only runs in GCE.
   } else {
-    gpr_log(
-        GPR_ERROR,
-        "Unsupported test case %s. Valid options are all|empty_unary|"
-        "large_unary|large_compressed_unary|client_streaming|server_streaming|"
-        "server_compressed_streaming|half_duplex|ping_pong|cancel_after_begin|"
-        "cancel_after_first_response|timeout_on_sleeping_server|empty_stream|"
-        "compute_engine_creds|jwt_token_creds|oauth2_auth_token|per_rpc_creds|"
-        "status_code_and_message|custom_metadata",
-        FLAGS_test_case.c_str());
+    const char* testcases[] = {"all",
+                               "cancel_after_begin",
+                               "cancel_after_first_response",
+                               "client_compressed_streaming",
+                               "client_compressed_unary",
+                               "client_streaming",
+                               "compute_engine_creds",
+                               "custom_metadata",
+                               "empty_stream",
+                               "empty_unary",
+                               "half_duplex",
+                               "jwt_token_creds",
+                               "large_unary",
+                               "oauth2_auth_token",
+                               "oauth2_auth_token",
+                               "per_rpc_creds",
+                               "per_rpc_creds",
+                               "ping_pong",
+                               "server_compressed_streaming",
+                               "server_compressed_unary",
+                               "server_streaming",
+                               "status_code_and_message",
+                               "timeout_on_sleeping_server"};
+    char* joined_testcases =
+        gpr_strjoin_sep(testcases, GPR_ARRAY_SIZE(testcases), "\n", NULL);
+
+    gpr_log(GPR_ERROR, "Unsupported test case %s. Valid options are\n%s",
+            FLAGS_test_case.c_str(), joined_testcases);
+    gpr_free(joined_testcases);
     ret = 1;
   }
 
diff --git a/test/cpp/interop/interop_client.cc b/test/cpp/interop/interop_client.cc
index 608f902d6febefbb7198fb6221852cc0fd7e176d..8861bc1163c144bbf5c0c0b329b7a4be5f8d2eec 100644
--- a/test/cpp/interop/interop_client.cc
+++ b/test/cpp/interop/interop_client.cc
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@ namespace testing {
 namespace {
 // The same value is defined by the Java client.
 const std::vector<int> request_stream_sizes = {27182, 8, 1828, 45904};
-const std::vector<int> response_stream_sizes = {31415, 59, 2653, 58979};
+const std::vector<int> response_stream_sizes = {31415, 9, 2653, 58979};
 const int kNumResponseMessages = 2000;
 const int kResponseMessageSize = 1030;
 const int kReceiveDelayMilliSeconds = 20;
@@ -67,28 +67,23 @@ const int kLargeResponseSize = 314159;
 void NoopChecks(const InteropClientContextInspector& inspector,
                 const SimpleRequest* request, const SimpleResponse* response) {}
 
-void CompressionChecks(const InteropClientContextInspector& inspector,
-                       const SimpleRequest* request,
-                       const SimpleResponse* response) {
+void UnaryCompressionChecks(const InteropClientContextInspector& inspector,
+                            const SimpleRequest* request,
+                            const SimpleResponse* response) {
   const grpc_compression_algorithm received_compression =
       inspector.GetCallCompressionAlgorithm();
-  if (request->request_compressed_response() &&
-      received_compression == GRPC_COMPRESS_NONE) {
-    if (request->request_compressed_response() &&
-        received_compression == GRPC_COMPRESS_NONE) {
+  if (request->response_compressed().value()) {
+    if (received_compression == GRPC_COMPRESS_NONE) {
       // Requested some compression, got NONE. This is an error.
       gpr_log(GPR_ERROR,
               "Failure: Requested compression but got uncompressed response "
               "from server.");
       abort();
     }
-  }
-  if (!request->request_compressed_response()) {
-    GPR_ASSERT(!(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS));
-  } else if (request->response_type() == PayloadType::COMPRESSABLE) {
-    // requested compression and compressable response => results should always
-    // be compressed.
     GPR_ASSERT(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS);
+  } else {
+    // Didn't request compression -> make sure the response is uncompressed
+    GPR_ASSERT(!(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS));
   }
 }
 }  // namespace
@@ -190,11 +185,16 @@ bool InteropClient::PerformLargeUnary(SimpleRequest* request,
                                       CheckerFn custom_checks_fn) {
   ClientContext context;
   InteropClientContextInspector inspector(context);
-  // If the request doesn't already specify the response type, default to
-  // COMPRESSABLE.
   request->set_response_size(kLargeResponseSize);
   grpc::string payload(kLargeRequestSize, '\0');
   request->mutable_payload()->set_body(payload.c_str(), kLargeRequestSize);
+  if (request->has_expect_compressed()) {
+    if (request->expect_compressed().value()) {
+      context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+    } else {
+      context.set_compression_algorithm(GRPC_COMPRESS_NONE);
+    }
+  }
 
   Status s = serviceStub_.Get()->UnaryCall(&context, *request, response);
   if (!AssertStatusOk(s)) {
@@ -204,27 +204,8 @@ bool InteropClient::PerformLargeUnary(SimpleRequest* request,
   custom_checks_fn(inspector, request, response);
 
   // Payload related checks.
-  GPR_ASSERT(response->payload().type() == request->response_type());
-  switch (response->payload().type()) {
-    case PayloadType::COMPRESSABLE:
-      GPR_ASSERT(response->payload().body() ==
-                 grpc::string(kLargeResponseSize, '\0'));
-      break;
-    case PayloadType::UNCOMPRESSABLE: {
-      // We don't really check anything: We can't assert that the payload is
-      // uncompressed because it's the server's prerogative to decide on that,
-      // and different implementations decide differently (ie, Java always
-      // compresses when requested to do so, whereas C core throws away the
-      // compressed payload if the output is larger than the input).
-      // In addition, we don't compare the actual random bytes received because
-      // asserting that data is sent/received properly isn't the purpose of this
-      // test. Moreover, different implementations are also free to use
-      // different sets of random bytes.
-    } break;
-    default:
-      GPR_ASSERT(false);
-  }
-
+  GPR_ASSERT(response->payload().body() ==
+             grpc::string(kLargeResponseSize, '\0'));
   return true;
 }
 
@@ -237,7 +218,6 @@ bool InteropClient::DoComputeEngineCreds(
   SimpleResponse response;
   request.set_fill_username(true);
   request.set_fill_oauth_scope(true);
-  request.set_response_type(PayloadType::COMPRESSABLE);
 
   if (!PerformLargeUnary(&request, &response)) {
     return false;
@@ -311,7 +291,6 @@ bool InteropClient::DoJwtTokenCreds(const grpc::string& username) {
   SimpleRequest request;
   SimpleResponse response;
   request.set_fill_username(true);
-  request.set_response_type(PayloadType::COMPRESSABLE);
 
   if (!PerformLargeUnary(&request, &response)) {
     return false;
@@ -327,7 +306,6 @@ bool InteropClient::DoLargeUnary() {
   gpr_log(GPR_DEBUG, "Sending a large unary rpc...");
   SimpleRequest request;
   SimpleResponse response;
-  request.set_response_type(PayloadType::COMPRESSABLE);
   if (!PerformLargeUnary(&request, &response)) {
     return false;
   }
@@ -335,32 +313,73 @@ bool InteropClient::DoLargeUnary() {
   return true;
 }
 
-bool InteropClient::DoLargeCompressedUnary() {
-  const bool request_compression[] = {false, true};
-  const PayloadType payload_types[] = {COMPRESSABLE, UNCOMPRESSABLE};
-  for (size_t i = 0; i < GPR_ARRAY_SIZE(payload_types); i++) {
-    for (size_t j = 0; j < GPR_ARRAY_SIZE(request_compression); j++) {
-      char* log_suffix;
-      gpr_asprintf(&log_suffix, "(compression=%s; payload=%s)",
-                   request_compression[j] ? "true" : "false",
-                   PayloadType_Name(payload_types[i]).c_str());
-
-      gpr_log(GPR_DEBUG, "Sending a large compressed unary rpc %s.",
-              log_suffix);
-      SimpleRequest request;
-      SimpleResponse response;
-      request.set_response_type(payload_types[i]);
-      request.set_request_compressed_response(request_compression[j]);
-
-      if (!PerformLargeUnary(&request, &response, CompressionChecks)) {
-        gpr_log(GPR_ERROR, "Large compressed unary failed %s", log_suffix);
-        gpr_free(log_suffix);
-        return false;
-      }
-
-      gpr_log(GPR_DEBUG, "Large compressed unary done %s.", log_suffix);
+bool InteropClient::DoClientCompressedUnary() {
+  // Probing for compression-checks support.
+  ClientContext probe_context;
+  SimpleRequest probe_req;
+  SimpleResponse probe_res;
+
+  probe_context.set_compression_algorithm(GRPC_COMPRESS_NONE);
+  probe_req.mutable_expect_compressed()->set_value(true);  // lies!
+
+  probe_req.set_response_size(kLargeResponseSize);
+  probe_req.mutable_payload()->set_body(grpc::string(kLargeRequestSize, '\0'));
+
+  gpr_log(GPR_DEBUG, "Sending probe for compressed unary request.");
+  const Status s =
+      serviceStub_.Get()->UnaryCall(&probe_context, probe_req, &probe_res);
+  if (s.error_code() != grpc::StatusCode::INVALID_ARGUMENT) {
+    // The server isn't able to evaluate incoming compression, making the rest
+    // of this test moot.
+    gpr_log(GPR_DEBUG, "Compressed unary request probe failed");
+    return false;
+  }
+  gpr_log(GPR_DEBUG, "Compressed unary request probe succeeded. Proceeding.");
+
+  const std::vector<bool> compressions = {true, false};
+  for (size_t i = 0; i < compressions.size(); i++) {
+    char* log_suffix;
+    gpr_asprintf(&log_suffix, "(compression=%s)",
+                 compressions[i] ? "true" : "false");
+
+    gpr_log(GPR_DEBUG, "Sending compressed unary request %s.", log_suffix);
+    SimpleRequest request;
+    SimpleResponse response;
+    request.mutable_expect_compressed()->set_value(compressions[i]);
+    if (!PerformLargeUnary(&request, &response, UnaryCompressionChecks)) {
+      gpr_log(GPR_ERROR, "Compressed unary request failed %s", log_suffix);
       gpr_free(log_suffix);
+      return false;
     }
+
+    gpr_log(GPR_DEBUG, "Compressed unary request failed %s", log_suffix);
+    gpr_free(log_suffix);
+  }
+
+  return true;
+}
+
+bool InteropClient::DoServerCompressedUnary() {
+  const std::vector<bool> compressions = {true, false};
+  for (size_t i = 0; i < compressions.size(); i++) {
+    char* log_suffix;
+    gpr_asprintf(&log_suffix, "(compression=%s)",
+                 compressions[i] ? "true" : "false");
+
+    gpr_log(GPR_DEBUG, "Sending unary request for compressed response %s.",
+            log_suffix);
+    SimpleRequest request;
+    SimpleResponse response;
+    request.mutable_response_compressed()->set_value(compressions[i]);
+
+    if (!PerformLargeUnary(&request, &response, UnaryCompressionChecks)) {
+      gpr_log(GPR_ERROR, "Request for compressed unary failed %s", log_suffix);
+      gpr_free(log_suffix);
+      return false;
+    }
+
+    gpr_log(GPR_DEBUG, "Request for compressed unary failed %s", log_suffix);
+    gpr_free(log_suffix);
   }
 
   return true;
@@ -387,7 +406,7 @@ bool InteropClient::DoRequestStreaming() {
       serviceStub_.Get()->StreamingInputCall(&context, &response));
 
   int aggregated_payload_size = 0;
-  for (unsigned int i = 0; i < request_stream_sizes.size(); ++i) {
+  for (size_t i = 0; i < request_stream_sizes.size(); ++i) {
     Payload* payload = request.mutable_payload();
     payload->set_body(grpc::string(request_stream_sizes[i], '\0'));
     if (!stream->Write(request)) {
@@ -396,7 +415,7 @@ bool InteropClient::DoRequestStreaming() {
     }
     aggregated_payload_size += request_stream_sizes[i];
   }
-  stream->WritesDone();
+  GPR_ASSERT(stream->WritesDone());
 
   Status s = stream->Finish();
   if (!AssertStatusOk(s)) {
@@ -446,92 +465,128 @@ bool InteropClient::DoResponseStreaming() {
   return true;
 }
 
-bool InteropClient::DoResponseCompressedStreaming() {
-  const bool request_compression[] = {false, true};
-  const PayloadType payload_types[] = {COMPRESSABLE, UNCOMPRESSABLE};
-  for (size_t i = 0; i < GPR_ARRAY_SIZE(payload_types); i++) {
-    for (size_t j = 0; j < GPR_ARRAY_SIZE(request_compression); j++) {
-      ClientContext context;
-      InteropClientContextInspector inspector(context);
-      StreamingOutputCallRequest request;
-
-      char* log_suffix;
-      gpr_asprintf(&log_suffix, "(compression=%s; payload=%s)",
-                   request_compression[j] ? "true" : "false",
-                   PayloadType_Name(payload_types[i]).c_str());
-
-      gpr_log(GPR_DEBUG, "Receiving response streaming rpc %s.", log_suffix);
-
-      request.set_response_type(payload_types[i]);
-      request.set_request_compressed_response(request_compression[j]);
-
-      for (size_t k = 0; k < response_stream_sizes.size(); ++k) {
-        ResponseParameters* response_parameter =
-            request.add_response_parameters();
-        response_parameter->set_size(response_stream_sizes[k]);
-      }
-      StreamingOutputCallResponse response;
-
-      std::unique_ptr<ClientReader<StreamingOutputCallResponse>> stream(
-          serviceStub_.Get()->StreamingOutputCall(&context, request));
-
-      size_t k = 0;
-      while (stream->Read(&response)) {
-        // Payload related checks.
-        GPR_ASSERT(response.payload().type() == request.response_type());
-        switch (response.payload().type()) {
-          case PayloadType::COMPRESSABLE:
-            GPR_ASSERT(response.payload().body() ==
-                       grpc::string(response_stream_sizes[k], '\0'));
-            break;
-          case PayloadType::UNCOMPRESSABLE:
-            break;
-          default:
-            GPR_ASSERT(false);
-        }
-
-        // Compression related checks.
-        if (request.request_compressed_response()) {
-          GPR_ASSERT(inspector.GetCallCompressionAlgorithm() >
-                     GRPC_COMPRESS_NONE);
-          if (request.response_type() == PayloadType::COMPRESSABLE) {
-            // requested compression and compressable response => results should
-            // always be compressed.
-            GPR_ASSERT(inspector.GetMessageFlags() &
-                       GRPC_WRITE_INTERNAL_COMPRESS);
-          }
-        } else {
-          // requested *no* compression.
-          GPR_ASSERT(
-              !(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS));
-        }
-
-        ++k;
-      }
-
-      gpr_log(GPR_DEBUG, "Response streaming done %s.", log_suffix);
-      gpr_free(log_suffix);
+bool InteropClient::DoClientCompressedStreaming() {
+  // Probing for compression-checks support.
+  ClientContext probe_context;
+  StreamingInputCallRequest probe_req;
+  StreamingInputCallResponse probe_res;
+
+  probe_context.set_compression_algorithm(GRPC_COMPRESS_NONE);
+  probe_req.mutable_expect_compressed()->set_value(true);  // lies!
+  probe_req.mutable_payload()->set_body(grpc::string(27182, '\0'));
 
-      if (k < response_stream_sizes.size()) {
-        // stream->Read() failed before reading all the expected messages. This
-        // is most likely due to a connection failure.
-        gpr_log(GPR_ERROR,
-                "DoResponseCompressedStreaming(): Responses read (k=%" PRIuPTR
-                ") is "
-                "less than the expected messages (i.e "
-                "response_stream_sizes.size() (%" PRIuPTR ")). (i=%" PRIuPTR
-                ", j=%" PRIuPTR ")",
-                k, response_stream_sizes.size(), i, j);
-        return TransientFailureOrAbort();
-      }
-
-      Status s = stream->Finish();
-      if (!AssertStatusOk(s)) {
-        return false;
-      }
+  gpr_log(GPR_DEBUG, "Sending probe for compressed streaming request.");
+
+  std::unique_ptr<ClientWriter<StreamingInputCallRequest>> probe_stream(
+      serviceStub_.Get()->StreamingInputCall(&probe_context, &probe_res));
+
+  if (!probe_stream->Write(probe_req)) {
+    gpr_log(GPR_ERROR, "%s(): stream->Write() failed", __func__);
+    return TransientFailureOrAbort();
+  }
+  Status s = probe_stream->Finish();
+  if (s.error_code() != grpc::StatusCode::INVALID_ARGUMENT) {
+    // The server isn't able to evaluate incoming compression, making the rest
+    // of this test moot.
+    gpr_log(GPR_DEBUG, "Compressed streaming request probe failed");
+    return false;
+  }
+  gpr_log(GPR_DEBUG,
+          "Compressed streaming request probe succeeded. Proceeding.");
+
+  ClientContext context;
+  StreamingInputCallRequest request;
+  StreamingInputCallResponse response;
+
+  context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+  std::unique_ptr<ClientWriter<StreamingInputCallRequest>> stream(
+      serviceStub_.Get()->StreamingInputCall(&context, &response));
+
+  request.mutable_payload()->set_body(grpc::string(27182, '\0'));
+  request.mutable_expect_compressed()->set_value(true);
+  gpr_log(GPR_DEBUG, "Sending streaming request with compression enabled");
+  if (!stream->Write(request)) {
+    gpr_log(GPR_ERROR, "%s(): stream->Write() failed", __func__);
+    return TransientFailureOrAbort();
+  }
+
+  WriteOptions wopts;
+  wopts.set_no_compression();
+  request.mutable_payload()->set_body(grpc::string(45904, '\0'));
+  request.mutable_expect_compressed()->set_value(false);
+  gpr_log(GPR_DEBUG, "Sending streaming request with compression disabled");
+  if (!stream->Write(request, wopts)) {
+    gpr_log(GPR_ERROR, "%s(): stream->Write() failed", __func__);
+    return TransientFailureOrAbort();
+  }
+  GPR_ASSERT(stream->WritesDone());
+
+  s = stream->Finish();
+  if (!AssertStatusOk(s)) {
+    return false;
+  }
+
+  return true;
+}
+
+bool InteropClient::DoServerCompressedStreaming() {
+  const std::vector<bool> compressions = {true, false};
+  const std::vector<int> sizes = {31415, 92653};
+
+  ClientContext context;
+  InteropClientContextInspector inspector(context);
+  StreamingOutputCallRequest request;
+
+  GPR_ASSERT(compressions.size() == sizes.size());
+  for (size_t i = 0; i < sizes.size(); i++) {
+    char* log_suffix;
+    gpr_asprintf(&log_suffix, "(compression=%s; size=%d)",
+                 compressions[i] ? "true" : "false", sizes[i]);
+
+    gpr_log(GPR_DEBUG, "Sending request streaming rpc %s.", log_suffix);
+    gpr_free(log_suffix);
+
+    ResponseParameters* const response_parameter =
+        request.add_response_parameters();
+    response_parameter->mutable_compressed()->set_value(compressions[i]);
+    response_parameter->set_size(sizes[i]);
+  }
+  std::unique_ptr<ClientReader<StreamingOutputCallResponse>> stream(
+      serviceStub_.Get()->StreamingOutputCall(&context, request));
+
+  size_t k = 0;
+  StreamingOutputCallResponse response;
+  while (stream->Read(&response)) {
+    // Payload size checks.
+    GPR_ASSERT(response.payload().body() ==
+               grpc::string(request.response_parameters(k).size(), '\0'));
+
+    // Compression checks.
+    GPR_ASSERT(request.response_parameters(k).has_compressed());
+    if (request.response_parameters(k).compressed().value()) {
+      GPR_ASSERT(inspector.GetCallCompressionAlgorithm() > GRPC_COMPRESS_NONE);
+      GPR_ASSERT(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS);
+    } else {
+      // requested *no* compression.
+      GPR_ASSERT(!(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS));
     }
+    ++k;
+  }
+
+  if (k < sizes.size()) {
+    // stream->Read() failed before reading all the expected messages. This
+    // is most likely due to a connection failure.
+    gpr_log(GPR_ERROR,
+            "%s(): Responses read (k=%" PRIuPTR
+            ") is less than the expected number of  messages (%" PRIuPTR ").",
+            __func__, k, sizes.size());
+    return TransientFailureOrAbort();
   }
 
+  Status s = stream->Finish();
+  if (!AssertStatusOk(s)) {
+    return false;
+  }
   return true;
 }
 
@@ -632,7 +687,6 @@ bool InteropClient::DoPingPong() {
       stream(serviceStub_.Get()->FullDuplexCall(&context));
 
   StreamingOutputCallRequest request;
-  request.set_response_type(PayloadType::COMPRESSABLE);
   ResponseParameters* response_parameter = request.add_response_parameters();
   Payload* payload = request.mutable_payload();
   StreamingOutputCallResponse response;
@@ -699,7 +753,6 @@ bool InteropClient::DoCancelAfterFirstResponse() {
       stream(serviceStub_.Get()->FullDuplexCall(&context));
 
   StreamingOutputCallRequest request;
-  request.set_response_type(PayloadType::COMPRESSABLE);
   ResponseParameters* response_parameter = request.add_response_parameters();
   response_parameter->set_size(31415);
   request.mutable_payload()->set_body(grpc::string(27182, '\0'));
@@ -839,7 +892,6 @@ bool InteropClient::DoCustomMetadata() {
         stream(serviceStub_.Get()->FullDuplexCall(&context));
 
     StreamingOutputCallRequest request;
-    request.set_response_type(PayloadType::COMPRESSABLE);
     ResponseParameters* response_parameter = request.add_response_parameters();
     response_parameter->set_size(kLargeResponseSize);
     grpc::string payload(kLargeRequestSize, '\0');
diff --git a/test/cpp/interop/interop_client.h b/test/cpp/interop/interop_client.h
index c8d6810c12e00c463731f61a0560a2525012a6ce..eb886fcb7e2c3126d0557e8c4af022bf29f8a2f2 100644
--- a/test/cpp/interop/interop_client.h
+++ b/test/cpp/interop/interop_client.h
@@ -64,12 +64,14 @@ class InteropClient {
 
   bool DoEmpty();
   bool DoLargeUnary();
-  bool DoLargeCompressedUnary();
+  bool DoServerCompressedUnary();
+  bool DoClientCompressedUnary();
   bool DoPingPong();
   bool DoHalfDuplex();
   bool DoRequestStreaming();
   bool DoResponseStreaming();
-  bool DoResponseCompressedStreaming();
+  bool DoServerCompressedStreaming();
+  bool DoClientCompressedStreaming();
   bool DoResponseStreamingWithSlowConsumer();
   bool DoCancelAfterBegin();
   bool DoCancelAfterFirstResponse();
diff --git a/test/cpp/interop/server_main.cc b/test/cpp/interop/interop_server.cc
similarity index 72%
rename from test/cpp/interop/server_main.cc
rename to test/cpp/interop/interop_server.cc
index 062857f3d6bc6ecbb35fb990dafb6e10fd7e594f..ebef0002a3c5127a3618f4fccb18f01e51850541 100644
--- a/test/cpp/interop/server_main.cc
+++ b/test/cpp/interop/interop_server.cc
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
 #include <grpc/support/log.h>
 #include <grpc/support/useful.h>
 
+#include "src/core/lib/transport/byte_stream.h"
 #include "src/proto/grpc/testing/empty.grpc.pb.h"
 #include "src/proto/grpc/testing/messages.grpc.pb.h"
 #include "src/proto/grpc/testing/test.grpc.pb.h"
@@ -64,10 +65,10 @@ using grpc::ServerCredentials;
 using grpc::ServerReader;
 using grpc::ServerReaderWriter;
 using grpc::ServerWriter;
+using grpc::WriteOptions;
 using grpc::SslServerCredentialsOptions;
 using grpc::testing::InteropServerContextInspector;
 using grpc::testing::Payload;
-using grpc::testing::PayloadType;
 using grpc::testing::SimpleRequest;
 using grpc::testing::SimpleResponse;
 using grpc::testing::StreamingInputCallRequest;
@@ -78,7 +79,6 @@ using grpc::testing::TestService;
 using grpc::Status;
 
 static bool got_sigint = false;
-static const char* kRandomFile = "test/cpp/interop/rnd.dat";
 
 const char kEchoInitialMetadataKey[] = "x-grpc-test-echo-initial";
 const char kEchoTrailingBinMetadataKey[] = "x-grpc-test-echo-trailing-bin";
@@ -110,34 +110,41 @@ void MaybeEchoMetadata(ServerContext* context) {
   }
 }
 
-bool SetPayload(PayloadType response_type, int size, Payload* payload) {
-  payload->set_type(response_type);
-  switch (response_type) {
-    case PayloadType::COMPRESSABLE: {
-      std::unique_ptr<char[]> body(new char[size]());
-      payload->set_body(body.get(), size);
-    } break;
-    case PayloadType::UNCOMPRESSABLE: {
-      std::unique_ptr<char[]> body(new char[size]());
-      std::ifstream rnd_file(kRandomFile);
-      GPR_ASSERT(rnd_file.good());
-      rnd_file.read(body.get(), size);
-      GPR_ASSERT(!rnd_file.eof());  // Requested more rnd bytes than available
-      payload->set_body(body.get(), size);
-    } break;
-    default:
-      GPR_ASSERT(false);
-  }
+bool SetPayload(int size, Payload* payload) {
+  std::unique_ptr<char[]> body(new char[size]());
+  payload->set_body(body.get(), size);
   return true;
 }
 
-template <typename RequestType>
-void SetResponseCompression(ServerContext* context,
-                            const RequestType& request) {
-  if (request.request_compressed_response()) {
-    // Any level would do, let's go for HIGH because we are overachievers.
-    context->set_compression_level(GRPC_COMPRESS_LEVEL_HIGH);
+bool CheckExpectedCompression(const ServerContext& context,
+                              const bool compression_expected) {
+  const InteropServerContextInspector inspector(context);
+  const grpc_compression_algorithm received_compression =
+      inspector.GetCallCompressionAlgorithm();
+
+  if (compression_expected) {
+    if (received_compression == GRPC_COMPRESS_NONE) {
+      // Expected some compression, got NONE. This is an error.
+      gpr_log(GPR_ERROR,
+              "Expected compression but got uncompressed request from client.");
+      return false;
+    }
+    if (!(inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS)) {
+      gpr_log(GPR_ERROR,
+              "Failure: Requested compression in a compressable request, but "
+              "compression bit in message flags not set.");
+      return false;
+    }
+  } else {
+    // Didn't expect compression -> make sure the request is uncompressed
+    if (inspector.GetMessageFlags() & GRPC_WRITE_INTERNAL_COMPRESS) {
+      gpr_log(GPR_ERROR,
+              "Failure: Didn't requested compression, but compression bit in "
+              "message flags set.");
+      return false;
+    }
   }
+  return true;
 }
 
 class TestServiceImpl : public TestService::Service {
@@ -151,11 +158,26 @@ class TestServiceImpl : public TestService::Service {
   Status UnaryCall(ServerContext* context, const SimpleRequest* request,
                    SimpleResponse* response) {
     MaybeEchoMetadata(context);
-    SetResponseCompression(context, *request);
+    if (request->has_response_compressed()) {
+      const bool compression_requested = request->response_compressed().value();
+      gpr_log(GPR_DEBUG, "Request for compression (%s) present for %s",
+              compression_requested ? "enabled" : "disabled", __func__);
+      if (compression_requested) {
+        // Any level would do, let's go for HIGH because we are overachievers.
+        context->set_compression_level(GRPC_COMPRESS_LEVEL_HIGH);
+      } else {
+        context->set_compression_level(GRPC_COMPRESS_LEVEL_NONE);
+      }
+    }
+    if (!CheckExpectedCompression(*context,
+                                  request->expect_compressed().value())) {
+      return Status(grpc::StatusCode::INVALID_ARGUMENT,
+                    "Compressed request expectation not met.");
+    }
     if (request->response_size() > 0) {
-      if (!SetPayload(request->response_type(), request->response_size(),
-                      response->mutable_payload())) {
-        return Status(grpc::StatusCode::INTERNAL, "Error creating payload.");
+      if (!SetPayload(request->response_size(), response->mutable_payload())) {
+        return Status(grpc::StatusCode::INVALID_ARGUMENT,
+                      "Error creating payload.");
       }
     }
 
@@ -171,15 +193,26 @@ class TestServiceImpl : public TestService::Service {
   Status StreamingOutputCall(
       ServerContext* context, const StreamingOutputCallRequest* request,
       ServerWriter<StreamingOutputCallResponse>* writer) {
-    SetResponseCompression(context, *request);
     StreamingOutputCallResponse response;
     bool write_success = true;
     for (int i = 0; write_success && i < request->response_parameters_size();
          i++) {
-      if (!SetPayload(request->response_type(),
-                      request->response_parameters(i).size(),
+      if (!SetPayload(request->response_parameters(i).size(),
                       response.mutable_payload())) {
-        return Status(grpc::StatusCode::INTERNAL, "Error creating payload.");
+        return Status(grpc::StatusCode::INVALID_ARGUMENT,
+                      "Error creating payload.");
+      }
+      WriteOptions wopts;
+      if (request->response_parameters(i).has_compressed()) {
+        // Compress by default. Disabled on a per-message basis.
+        context->set_compression_level(GRPC_COMPRESS_LEVEL_HIGH);
+        const bool compression_requested =
+            request->response_parameters(i).compressed().value();
+        gpr_log(GPR_DEBUG, "Request for compression (%s) present for %s",
+                compression_requested ? "enabled" : "disabled", __func__);
+        if (!compression_requested) {
+          wopts.set_no_compression();
+        }  // else, compression is already enabled via the context.
       }
       int time_us;
       if ((time_us = request->response_parameters(i).interval_us()) > 0) {
@@ -189,7 +222,7 @@ class TestServiceImpl : public TestService::Service {
                          gpr_time_from_micros(time_us, GPR_TIMESPAN));
         gpr_sleep_until(sleep_time);
       }
-      write_success = writer->Write(response);
+      write_success = writer->Write(response, wopts);
     }
     if (write_success) {
       return Status::OK;
@@ -204,6 +237,11 @@ class TestServiceImpl : public TestService::Service {
     StreamingInputCallRequest request;
     int aggregated_payload_size = 0;
     while (reader->Read(&request)) {
+      if (!CheckExpectedCompression(*context,
+                                    request.expect_compressed().value())) {
+        return Status(grpc::StatusCode::INVALID_ARGUMENT,
+                      "Compressed request expectation not met.");
+      }
       if (request.has_payload()) {
         aggregated_payload_size += request.payload().body().size();
       }
@@ -221,7 +259,6 @@ class TestServiceImpl : public TestService::Service {
     StreamingOutputCallResponse response;
     bool write_success = true;
     while (write_success && stream->Read(&request)) {
-      SetResponseCompression(context, request);
       if (request.response_parameters_size() != 0) {
         response.mutable_payload()->set_type(request.payload().type());
         response.mutable_payload()->set_body(
diff --git a/test/cpp/interop/metrics_client.cc b/test/cpp/interop/metrics_client.cc
index 7a0cb994df50e9b9c17c68a1f12947c754b5dde8..179de30805c89f4dbddeb5ead6de97764a908dc1 100644
--- a/test/cpp/interop/metrics_client.cc
+++ b/test/cpp/interop/metrics_client.cc
@@ -56,6 +56,9 @@ using grpc::testing::GaugeResponse;
 using grpc::testing::MetricsService;
 using grpc::testing::MetricsServiceImpl;
 
+// Do not log anything
+void BlackholeLogger(gpr_log_func_args* args) {}
+
 // Prints the values of all Gauges (unless total_only is set to 'true' in which
 // case this only prints the sum of all gauge values).
 bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only,
@@ -76,21 +79,21 @@ bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only,
   while (reader->Read(&gauge_response)) {
     if (gauge_response.value_case() == GaugeResponse::kLongValue) {
       if (!total_only) {
-        gpr_log(GPR_INFO, "%s: %lld", gauge_response.name().c_str(),
-                gauge_response.long_value());
+        std::cout << gauge_response.name() << ": "
+                  << gauge_response.long_value() << std::endl;
       }
       overall_qps += gauge_response.long_value();
     } else {
-      gpr_log(GPR_INFO, "Gauge %s is not a long value",
-              gauge_response.name().c_str());
+      std::cout << "Gauge '" << gauge_response.name() << "' is not long valued"
+                << std::endl;
     }
   }
 
-  gpr_log(GPR_INFO, "%ld", overall_qps);
+  std::cout << overall_qps << std::endl;
 
   const grpc::Status status = reader->Finish();
   if (!status.ok()) {
-    gpr_log(GPR_ERROR, "Error in getting metrics from the client");
+    std::cout << "Error in getting metrics from the client" << std::endl;
   }
 
   return status.ok();
@@ -99,14 +102,10 @@ bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only,
 int main(int argc, char** argv) {
   grpc::testing::InitTest(&argc, &argv, true);
 
-  // Make sure server_addresses flag is not empty
-  if (FLAGS_metrics_server_address.empty()) {
-    gpr_log(
-        GPR_ERROR,
-        "Cannot connect to the Metrics server. Please pass the address of the"
-        "metrics server to connect to via the 'metrics_server_address' flag");
-    return 1;
-  }
+  // The output of metrics client is in some cases programatically parsed (for
+  // example by the stress test framework). So, we do not want any of the log
+  // from the grpc library appearing on stdout.
+  gpr_set_log_function(BlackholeLogger);
 
   std::shared_ptr<grpc::Channel> channel(grpc::CreateChannel(
       FLAGS_metrics_server_address, grpc::InsecureChannelCredentials()));
diff --git a/test/cpp/interop/rnd.dat b/test/cpp/interop/rnd.dat
deleted file mode 100644
index 8c7f38f9e0e01b33798cbe743bd5f873b6e0a1af..0000000000000000000000000000000000000000
Binary files a/test/cpp/interop/rnd.dat and /dev/null differ
diff --git a/test/cpp/interop/server_helper.cc b/test/cpp/interop/server_helper.cc
index c6d891ad71b711ce270ae8a8af62c45c4b7012ce..8b0b511bcb89d62d4e5642266b8aaaa83bf9e6b2 100644
--- a/test/cpp/interop/server_helper.cc
+++ b/test/cpp/interop/server_helper.cc
@@ -72,6 +72,10 @@ uint32_t InteropServerContextInspector::GetEncodingsAcceptedByClient() const {
   return grpc_call_test_only_get_encodings_accepted_by_peer(context_.call_);
 }
 
+uint32_t InteropServerContextInspector::GetMessageFlags() const {
+  return grpc_call_test_only_get_message_flags(context_.call_);
+}
+
 std::shared_ptr<const AuthContext>
 InteropServerContextInspector::GetAuthContext() const {
   return context_.auth_context();
diff --git a/test/cpp/interop/server_helper.h b/test/cpp/interop/server_helper.h
index 12865e403247906a4e260ea219611cd952c78b86..a1da14a4c8da5de623c9f75c03e9d1f8c90812e8 100644
--- a/test/cpp/interop/server_helper.h
+++ b/test/cpp/interop/server_helper.h
@@ -54,6 +54,7 @@ class InteropServerContextInspector {
   bool IsCancelled() const;
   grpc_compression_algorithm GetCallCompressionAlgorithm() const;
   uint32_t GetEncodingsAcceptedByClient() const;
+  uint32_t GetMessageFlags() const;
 
  private:
   const ::grpc::ServerContext& context_;
diff --git a/test/cpp/interop/stress_interop_client.cc b/test/cpp/interop/stress_interop_client.cc
index aa95682e7413598b360b06312fbc99175c0eabf0..1d5fc80cf26cd135c8b10eeed5dbd3d6eaa84f06 100644
--- a/test/cpp/interop/stress_interop_client.cc
+++ b/test/cpp/interop/stress_interop_client.cc
@@ -138,8 +138,12 @@ bool StressTestInteropClient::RunTest(TestCaseType test_case) {
       is_success = interop_client_->DoLargeUnary();
       break;
     }
-    case LARGE_COMPRESSED_UNARY: {
-      is_success = interop_client_->DoLargeCompressedUnary();
+    case CLIENT_COMPRESSED_UNARY: {
+      is_success = interop_client_->DoClientCompressedUnary();
+      break;
+    }
+    case CLIENT_COMPRESSED_STREAMING: {
+      is_success = interop_client_->DoClientCompressedStreaming();
       break;
     }
     case CLIENT_STREAMING: {
@@ -150,8 +154,12 @@ bool StressTestInteropClient::RunTest(TestCaseType test_case) {
       is_success = interop_client_->DoResponseStreaming();
       break;
     }
+    case SERVER_COMPRESSED_UNARY: {
+      is_success = interop_client_->DoServerCompressedUnary();
+      break;
+    }
     case SERVER_COMPRESSED_STREAMING: {
-      is_success = interop_client_->DoResponseCompressedStreaming();
+      is_success = interop_client_->DoServerCompressedStreaming();
       break;
     }
     case SLOW_CONSUMER: {
diff --git a/test/cpp/interop/stress_interop_client.h b/test/cpp/interop/stress_interop_client.h
index aa93b58b4a75b233d0c10ce55df4c2c3c9dbdbd1..cf6a7134739ae693286ec6ecdbf4ccb736d3766e 100644
--- a/test/cpp/interop/stress_interop_client.h
+++ b/test/cpp/interop/stress_interop_client.h
@@ -51,29 +51,33 @@ using std::vector;
 
 enum TestCaseType {
   UNKNOWN_TEST = -1,
-  EMPTY_UNARY = 0,
-  LARGE_UNARY = 1,
-  LARGE_COMPRESSED_UNARY = 2,
-  CLIENT_STREAMING = 3,
-  SERVER_STREAMING = 4,
-  SERVER_COMPRESSED_STREAMING = 5,
-  SLOW_CONSUMER = 6,
-  HALF_DUPLEX = 7,
-  PING_PONG = 8,
-  CANCEL_AFTER_BEGIN = 9,
-  CANCEL_AFTER_FIRST_RESPONSE = 10,
-  TIMEOUT_ON_SLEEPING_SERVER = 11,
-  EMPTY_STREAM = 12,
-  STATUS_CODE_AND_MESSAGE = 13,
-  CUSTOM_METADATA = 14
+  EMPTY_UNARY,
+  LARGE_UNARY,
+  CLIENT_COMPRESSED_UNARY,
+  CLIENT_COMPRESSED_STREAMING,
+  CLIENT_STREAMING,
+  SERVER_STREAMING,
+  SERVER_COMPRESSED_UNARY,
+  SERVER_COMPRESSED_STREAMING,
+  SLOW_CONSUMER,
+  HALF_DUPLEX,
+  PING_PONG,
+  CANCEL_AFTER_BEGIN,
+  CANCEL_AFTER_FIRST_RESPONSE,
+  TIMEOUT_ON_SLEEPING_SERVER,
+  EMPTY_STREAM,
+  STATUS_CODE_AND_MESSAGE,
+  CUSTOM_METADATA
 };
 
 const vector<pair<TestCaseType, grpc::string>> kTestCaseList = {
     {EMPTY_UNARY, "empty_unary"},
     {LARGE_UNARY, "large_unary"},
-    {LARGE_COMPRESSED_UNARY, "large_compressed_unary"},
+    {CLIENT_COMPRESSED_UNARY, "client_compressed_unary"},
+    {CLIENT_COMPRESSED_STREAMING, "client_compressed_streaming"},
     {CLIENT_STREAMING, "client_streaming"},
     {SERVER_STREAMING, "server_streaming"},
+    {SERVER_COMPRESSED_UNARY, "server_compressed_unary"},
     {SERVER_COMPRESSED_STREAMING, "server_compressed_streaming"},
     {SLOW_CONSUMER, "slow_consumer"},
     {HALF_DUPLEX, "half_duplex"},
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 3d98ab09390e37b1e9eb89b49c57adad977c2020..1507d1e3d6678e5489ecb1d49a2290b25b475e9d 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -180,14 +180,14 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
 
     using namespace std::placeholders;
     int t = 0;
-    for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {
-      for (int ch = 0; ch < config.client_channels(); ch++) {
+    for (int ch = 0; ch < config.client_channels(); ch++) {
+      for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {
         auto* cq = cli_cqs_[t].get();
         auto ctx =
             setup_ctx(channels_[ch].get_stub(), next_issuers_[t], request_);
         ctx->Start(cq);
-        t = (t + 1) % cli_cqs_.size();
       }
+      t = (t + 1) % cli_cqs_.size();
     }
   }
   virtual ~AsyncClient() {
diff --git a/test/cpp/util/slice_test.cc b/test/cpp/util/slice_test.cc
index de7ff031ab8593ecbc63908cc18f60cd9623bac4..45799ae157c3d2104a1aff5fc814bc932449b3c4 100644
--- a/test/cpp/util/slice_test.cc
+++ b/test/cpp/util/slice_test.cc
@@ -68,6 +68,16 @@ TEST_F(SliceTest, Empty) {
   CheckSlice(empty_slice, "");
 }
 
+TEST_F(SliceTest, Cslice) {
+  gpr_slice s = gpr_slice_from_copied_string(kContent);
+  Slice spp(s, Slice::STEAL_REF);
+  CheckSlice(spp, kContent);
+  gpr_slice c_slice = spp.c_slice();
+  EXPECT_EQ(GPR_SLICE_START_PTR(s), GPR_SLICE_START_PTR(c_slice));
+  EXPECT_EQ(GPR_SLICE_END_PTR(s), GPR_SLICE_END_PTR(c_slice));
+  gpr_slice_unref(c_slice);
+}
+
 }  // namespace
 }  // namespace grpc
 
diff --git a/third_party/protobuf b/third_party/protobuf
index d4d13a4349e4e59d67f311185ddcc1890d956d7a..bdeb215cab2985195325fcd5e70c3fa751f46e0f 160000
--- a/third_party/protobuf
+++ b/third_party/protobuf
@@ -1 +1 @@
-Subproject commit d4d13a4349e4e59d67f311185ddcc1890d956d7a
+Subproject commit bdeb215cab2985195325fcd5e70c3fa751f46e0f
diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py
index 72c65ad14a885a05e05edc746760db9202d9c955..f5e89f1da67cefa82078b8145670d94f4f6630dd 100755
--- a/tools/distrib/python/docgen.py
+++ b/tools/distrib/python/docgen.py
@@ -70,7 +70,6 @@ environment.update({
 })
 
 subprocess_arguments_list = [
-    {'args': ['make'], 'cwd': PROJECT_ROOT},
     {'args': ['virtualenv', VIRTUALENV_DIR], 'env': environment},
     {'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH],
      'env': environment},
diff --git a/tools/distrib/python/grpcio_tools/.gitignore b/tools/distrib/python/grpcio_tools/.gitignore
index 979704d970bf3dc61d4f5e968d693fe43809ceee..9f3a7360ee6d404884842320f436cb1ccf94b792 100644
--- a/tools/distrib/python/grpcio_tools/.gitignore
+++ b/tools/distrib/python/grpcio_tools/.gitignore
@@ -5,3 +5,4 @@ grpc_root/
 *.c
 *.cpp
 *.egg-info
+*.so
diff --git a/tools/distrib/python/grpcio_tools/README.rst b/tools/distrib/python/grpcio_tools/README.rst
index e9f137493ba97bb46ad09c04bb513f867c3cc586..8946a1d5b31be350b90ea864e47c26efd158968c 100644
--- a/tools/distrib/python/grpcio_tools/README.rst
+++ b/tools/distrib/python/grpcio_tools/README.rst
@@ -53,7 +53,7 @@ GCC-like stuff, but you may end up having a bad time.
 ::
 
   $ export REPO_ROOT=grpc  # REPO_ROOT can be any directory of your choice
-  $ git clone https://github.com/grpc/grpc.git $REPO_ROOT
+  $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc $REPO_ROOT
   $ cd $REPO_ROOT
   $ git submodule update --init
 
diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/protoc_compiler.pyx b/tools/distrib/python/grpcio_tools/grpc/tools/_protoc_compiler.pyx
similarity index 100%
rename from tools/distrib/python/grpcio_tools/grpc/tools/protoc_compiler.pyx
rename to tools/distrib/python/grpcio_tools/grpc/tools/_protoc_compiler.pyx
diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/command.py b/tools/distrib/python/grpcio_tools/grpc/tools/command.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccf38b7d569e066f314b057214903356f144a84b
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/tools/command.py
@@ -0,0 +1,70 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+
+import setuptools
+
+from grpc.tools import protoc
+
+
+class BuildProtoModules(setuptools.Command):
+  """Command to generate project *_pb2.py modules from proto files."""
+
+  description = 'build grpc protobuf modules'
+  user_options = []
+
+  def initialize_options(self):
+    pass
+
+  def finalize_options(self):
+    pass
+
+  def run(self):
+    # due to limitations of the proto generator, we require that only *one*
+    # directory is provided as an 'include' directory. We assume it's the '' key
+    # to `self.distribution.package_dir` (and get a key error if it's not
+    # there).
+    proto_files = []
+    inclusion_root = os.path.abspath(self.distribution.package_dir[''])
+    for root, _, files in os.walk(inclusion_root):
+      for filename in files:
+        if filename.endswith('.proto'):
+          proto_files.append(os.path.abspath(os.path.join(root, filename)))
+
+    for proto_file in proto_files:
+      command = [
+          'grpc.tools.protoc',
+          '--proto_path={}'.format(inclusion_root),
+          '--python_out={}'.format(inclusion_root),
+          '--grpc_python_out={}'.format(inclusion_root),
+      ] + [proto_file]
+      if protoc.main(command) != 0:
+        sys.stderr.write('warning: {} failed'.format(command))
diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/main.cc b/tools/distrib/python/grpcio_tools/grpc/tools/main.cc
index 81675b4e6fce381b6d8c67d16c2dfae18a99f314..83918395135d5090676f12973ca1b11e74ccdc1e 100644
--- a/tools/distrib/python/grpcio_tools/grpc/tools/main.cc
+++ b/tools/distrib/python/grpcio_tools/grpc/tools/main.cc
@@ -45,7 +45,6 @@ int protoc_main(int argc, char* argv[]) {
 
   // gRPC Python
   grpc_python_generator::GeneratorConfiguration grpc_py_config;
-  grpc_py_config.beta_package_root = "grpc.beta";
   grpc_python_generator::PythonGrpcGenerator grpc_py_generator(grpc_py_config);
   cli.RegisterGenerator("--grpc_python_out", &grpc_py_generator,
                         "Generate Python source file.");
diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py b/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py
index 1c69e78920c65da7d1193011fa5aa31d37e7dc96..e1256a7dd9bd86542fb155755ee83aa09d3d68e5 100644
--- a/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py
+++ b/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py
@@ -32,10 +32,18 @@
 import pkg_resources
 import sys
 
-from grpc.tools import protoc_compiler
+from grpc.tools import _protoc_compiler
 
+def main(command_arguments):
+  """Run the protocol buffer compiler with the given command-line arguments.
+
+  Args:
+    command_arguments: a list of strings representing command line arguments to
+        `protoc`.
+  """
+  command_arguments = [argument.encode() for argument in command_arguments]
+  return _protoc_compiler.run_main(command_arguments)
 
 if __name__ == '__main__':
   proto_include = pkg_resources.resource_filename('grpc.tools', '_proto')
-  protoc_compiler.run_main(
-      sys.argv + ['-I{}'.format(proto_include)])
+  sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py
index 1267d0e45dc784cede565b9231b89aa2c12de668..79c40717ddf32044221186c018b180346703bc00 100644
--- a/tools/distrib/python/grpcio_tools/grpc_version.py
+++ b/tools/distrib/python/grpcio_tools/grpc_version.py
@@ -29,4 +29,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
 
-VERSION='0.15.0.dev0'
+VERSION='1.1.0.dev0'
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index fbe69f43d56cd65c24946e32da31f91e87cc5a7b..d804f34fc60b2ac824aea2e9c62d5e31ab816d73 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -31,9 +31,12 @@ from distutils import extension
 import errno
 import os
 import os.path
+import pkg_resources
+import platform
 import shlex
 import shutil
 import sys
+import sysconfig
 
 import setuptools
 from setuptools.command import build_ext
@@ -43,6 +46,11 @@ from setuptools.command import build_ext
 os.chdir(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(0, os.path.abspath('.'))
 
+import protoc_lib_deps
+import grpc_version
+
+PY3 = sys.version_info.major == 3
+
 # There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
 # entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
 # We use these environment variables to thus get around that without locking
@@ -56,8 +64,18 @@ EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS',
 GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools'
 GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto'
 
-import protoc_lib_deps
-import grpc_version
+DEFINE_MACROS = (('HAVE_PTHREAD', 1),)
+if "win32" in sys.platform and '64bit' in platform.architecture()[0]:
+  DEFINE_MACROS += (('MS_WIN64', 1),)
+
+# By default, Python3 distutils enforces compatibility of
+# c plugins (.so files) with the OSX version Python3 was built with.
+# For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
+if 'darwin' in sys.platform and PY3:
+  mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+  if mac_target and (pkg_resources.parse_version(mac_target) <
+		     pkg_resources.parse_version('10.9.0')):
+    os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
 
 def package_data():
   tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
@@ -86,8 +104,8 @@ def protoc_ext_module():
       os.path.join(protoc_lib_deps.CC_INCLUDE, cc_file)
       for cc_file in protoc_lib_deps.CC_FILES]
   plugin_ext = extension.Extension(
-      name='grpc.tools.protoc_compiler',
-      sources=['grpc/tools/protoc_compiler.pyx'] + plugin_sources,
+      name='grpc.tools._protoc_compiler',
+      sources=['grpc/tools/_protoc_compiler.pyx'] + plugin_sources,
       include_dirs=[
           '.',
           'grpc_root',
@@ -95,9 +113,9 @@ def protoc_ext_module():
           protoc_lib_deps.CC_INCLUDE,
       ],
       language='c++',
-      define_macros=[('HAVE_PTHREAD', 1)],
-      extra_compile_args=EXTRA_COMPILE_ARGS,
-      extra_link_args=EXTRA_LINK_ARGS,
+      define_macros=list(DEFINE_MACROS),
+      extra_compile_args=list(EXTRA_COMPILE_ARGS),
+      extra_link_args=list(EXTRA_LINK_ARGS),
   )
   return plugin_ext
 
@@ -116,7 +134,7 @@ setuptools.setup(
   namespace_packages=['grpc'],
   install_requires=[
     'protobuf>=3.0.0a3',
-    'grpcio>=0.14.0',
+    'grpcio>=0.15.0',
   ],
   package_data=package_data(),
 )
diff --git a/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile b/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
index 4ae4ebdb06dd573f8d158fea19b727e866892d4e..669e3557b8923a825b808a155d0eee29896f2b4c 100644
--- a/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
+++ b/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
@@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \
 
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
 
 
 ##################
diff --git a/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile b/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile
index 9c2fd52eee1a4f87569408d16052cb3387c295d3..860b8f4fb94b0ac33ea70380487f00d58a0302d7 100644
--- a/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile
+++ b/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile
@@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \
 
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
 
 
 ##################
diff --git a/tools/dockerfile/grpc_check_generated_pb_files/Dockerfile b/tools/dockerfile/grpc_check_generated_pb_files/Dockerfile
index 7658991462d040f190639e76c07318c7952f9e61..d19bc67120fc069752828694dbe3eeeb7cba7c16 100644
--- a/tools/dockerfile/grpc_check_generated_pb_files/Dockerfile
+++ b/tools/dockerfile/grpc_check_generated_pb_files/Dockerfile
@@ -67,11 +67,6 @@ RUN apt-get update && apt-get install -y time && apt-get clean
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
-
 RUN mkdir /var/local/jenkins
 
 # Define the default command.
diff --git a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
index eab7611b3f3ef3a04c675caa10ce3ae2cbe43980..462c65ab5e9be20dcc582c7916b3391020b83cb7 100755
--- a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
+++ b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
@@ -31,7 +31,7 @@
 set -e
 
 # directories to run against
-DIRS="src/core/lib src/core/ext src/cpp test/core test/cpp include"
+DIRS="src/core/lib src/core/ext src/cpp test/core test/cpp include src/compiler"
 
 # file matching patterns to check
 GLOB="*.h *.c *.cc"
diff --git a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
index baab2f5638040b5d19b6382f0c9eec1def4e2690..e3d52f0cb5355780fd1630d2201101e5e69125ef 100644
--- a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #================
 # C# dependencies
 
@@ -88,10 +103,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
index 2bbccca9e51d3189f6de5cbee6ece257747bee8a..aa77d5f1272492d446c25eabfccded7ea4bd858a 100644
--- a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
@@ -75,10 +90,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
index ec71a53c2d3a5c3188bff193868d425cd9242bfa..05e963d1e67eb121d8eda30188dbc1c46f09d605 100644
--- a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
@@ -32,5 +32,20 @@ FROM golang:1.5
 # Using login shell removes Go from path, so we add it.
 RUN ln -s /usr/local/go/bin/go /usr/local/bin
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Define the default command.
 CMD ["bash"]
diff --git a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
index ec71a53c2d3a5c3188bff193868d425cd9242bfa..05e963d1e67eb121d8eda30188dbc1c46f09d605 100644
--- a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
@@ -32,5 +32,20 @@ FROM golang:1.5
 # Using login shell removes Go from path, so we add it.
 RUN ln -s /usr/local/go/bin/go /usr/local/bin
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Define the default command.
 CMD ["bash"]
diff --git a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
index 252c9bc928629e05d4d32ecdaaf6978f80a25974..b5fe54f99185e8b9e1fad67e6628c0601cdc2c8f 100644
--- a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
@@ -47,6 +47,21 @@ ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
 ENV PATH $PATH:$JAVA_HOME/bin
 
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 
 # Trigger download of as many Gradle artifacts as possible.
 RUN git clone --recursive --depth 1 https://github.com/grpc/grpc-java.git && \
diff --git a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
index 2a8d35a5dc144691935bed3c7ef212a4f99fc6fa..d9a75018295323351de516df0149f8f838e50013 100644
--- a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Node dependencies
 
@@ -82,10 +97,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile
index e27a6a23013b9bb9bff2c46f0aa061b04757f2b7..65a8334269af391b4b35e3d9a14f2428210f0b3e 100644
--- a/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Ruby dependencies
 
@@ -100,10 +115,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
index 071fb2c93b139292026f106e8802535f4ab7e7d6..10a88916ada1219735e89ac7a10fef9f72ba00d5 100644
--- a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
@@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \
 # Install Python packages from PyPI
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
 
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
@@ -86,10 +86,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
index df8eef54385292da68685464894d61b56cb60af4..dae64e5c8cb199cce992a3220ce4be28a763d4cb 100644
--- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Ruby dependencies
 
@@ -86,10 +101,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
index 823fe948fb29a0dcfbb8fea970f5f5977d985164..81e3fdc38040e265d3d1ab58ab51f5994afb2891 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
index 556a26ee13905fd2ff24598394b57dbccbe687d9..e082da648b3b69dca13b37b0bc9ed9b7dbf1d2aa 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
index 2a875f59f19075b3d4dd7960395e427dfd1ac125..1e2b7d8c67197da899ffeed69bd41803f13ddce9 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile
@@ -34,6 +34,21 @@ RUN apt-get update && apt-get install -y python-pip && apt-get clean
 RUN pip install --upgrade google-api-python-client
 
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Using login shell removes Go from path, so we add it.
 RUN ln -s /usr/local/go/bin/go /usr/local/bin
 
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
index 69bef1480c878d500ff2c147df0d04cf32c30e80..0c17ff595ef9ce9bfe7bfd842147022a88ef6d9d 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
index 4fd7cc29a3c7f422c7a4539c18b917d6bf9e5ce3..0594f69a5b8645cf82b37251ecab578dceae115b 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Node dependencies
 
@@ -87,10 +102,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
index c29aaf7c3f3fb80f3579ba46761357f4d5436dee..bbbdd4a1518261fec39416f0fad9d9ead40c97c7 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Ruby dependencies
 
@@ -105,10 +120,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
index 606b7654576f93fef443c2893b743136614c3b4c..20d2d3f57bedf12a72a0208660a53db61d2563ad 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
@@ -93,7 +93,7 @@ RUN apt-get update && apt-get install -y \
 # Install Python packages from PyPI
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
 
 
 RUN pip install coverage
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
index 36b54ddafe38eb22d8c7314fc1a82757c9fcbf30..f459153fe5420c4c717d0f253eb828a8c98910ee 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..25c6fe6ec6aade7e52094895d2372be185596d2f
--- /dev/null
+++ b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
@@ -0,0 +1,125 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FROM debian:jessie
+
+# Install Git and basic packages.
+RUN apt-get update && apt-get install -y \
+  autoconf \
+  autotools-dev \
+  build-essential \
+  bzip2 \
+  ccache \
+  curl \
+  gcc \
+  gcc-multilib \
+  git \
+  golang \
+  gyp \
+  lcov \
+  libc6 \
+  libc6-dbg \
+  libc6-dev \
+  libgtest-dev \
+  libtool \
+  make \
+  perl \
+  strace \
+  python-dev \
+  python-setuptools \
+  python-yaml \
+  telnet \
+  unzip \
+  wget \
+  zip && apt-get clean
+
+#================
+# Build profiling
+RUN apt-get update && apt-get install -y time && apt-get clean
+
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
+#================
+# C# dependencies
+
+# Update to a newer version of mono
+RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
+RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
+RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
+RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
+RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
+
+# Install dependencies
+RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \
+    mono-devel \
+    ca-certificates-mono \
+    nuget \
+    && apt-get clean
+
+
+# Install dotnet SDK based on https://www.microsoft.com/net/core#debian
+RUN apt-get update && apt-get install -y curl libunwind8 gettext
+RUN curl -sSL -o dotnet.tar.gz https://go.microsoft.com/fwlink/?LinkID=809130
+RUN mkdir -p /opt/dotnet && tar zxf dotnet.tar.gz -C /opt/dotnet
+RUN ln -s /opt/dotnet/dotnet /usr/local/bin
+
+# Trigger the population of the local package cache
+ENV NUGET_XMLDOC_MODE skip
+RUN mkdir warmup \
+    && cd warmup \
+    && dotnet new \
+    && cd .. \
+    && rm -rf warmup
+
+# Prepare ccache
+RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
+RUN ln -s /usr/bin/ccache /usr/local/bin/g++
+RUN ln -s /usr/bin/ccache /usr/local/bin/cc
+RUN ln -s /usr/bin/ccache /usr/local/bin/c++
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
+
+
+RUN mkdir /var/local/jenkins
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
index baab2f5638040b5d19b6382f0c9eec1def4e2690..e3d52f0cb5355780fd1630d2201101e5e69125ef 100644
--- a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #================
 # C# dependencies
 
@@ -88,10 +103,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
index 649215892994b77c33ffa008ac3028131296cbfb..67cee19914d123ad478969cc61874a05bae346ba 100644
--- a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
@@ -108,10 +123,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
index 92c9c4ce86e25cbeb6ebe4234a4010414245171b..bee0849c67f5cd7824777bc8260b5b4cea17f722 100644
--- a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
+++ b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
@@ -75,10 +90,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
index 5982c9783e08b341fccbbf9550a3280a03499fb1..2b3f4af3e6de978e3fe927ff325787a22adc5c9e 100644
--- a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
@@ -63,14 +63,25 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
index d356433163e7a419eeab4ad82b06954241993148..2d282276d31fd0b8fa73494e27316b24e9406427 100644
--- a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
@@ -75,10 +90,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
index dd9a79b1ed5acfca56bdec12fafc49d2556f44b2..c25033387f9f452a032e139521065b2c2288e089 100644
--- a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
+++ b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
@@ -86,10 +101,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/fuzzer/Dockerfile b/tools/dockerfile/test/fuzzer/Dockerfile
index 6ba31114ab6620d82efd3c28645f7034b96bb5ab..bd04f07cea516ebcb14a9c6c67977190866ead20 100644
--- a/tools/dockerfile/test/fuzzer/Dockerfile
+++ b/tools/dockerfile/test/fuzzer/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
@@ -108,10 +123,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
index 5c3f77405ea15d82159d2da9f57711f2f5c7f7ef..92c8436851bb3046dbfae0ab1fcc38af3d780393 100644
--- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
@@ -137,7 +137,7 @@ RUN apt-get update && apt-get install -y \
 # Install Python packages from PyPI
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
 
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
@@ -147,10 +147,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/node_jessie_x64/Dockerfile b/tools/dockerfile/test/node_jessie_x64/Dockerfile
index 2a8d35a5dc144691935bed3c7ef212a4f99fc6fa..d9a75018295323351de516df0149f8f838e50013 100644
--- a/tools/dockerfile/test/node_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/node_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Node dependencies
 
@@ -82,10 +97,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/php_jessie_x64/Dockerfile b/tools/dockerfile/test/php_jessie_x64/Dockerfile
index d8d27846c16d0ab19a98b2188dc12a8623d6f852..2ef6e1d47fa74ad6df8a7b4abf348ec1f18d3fb1 100644
--- a/tools/dockerfile/test/php_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/php_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #=================
 # PHP dependencies
 
@@ -85,10 +100,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/python_jessie_x64/Dockerfile b/tools/dockerfile/test/python_jessie_x64/Dockerfile
index 071fb2c93b139292026f106e8802535f4ab7e7d6..10a88916ada1219735e89ac7a10fef9f72ba00d5 100644
--- a/tools/dockerfile/test/python_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/python_jessie_x64/Dockerfile
@@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \
 # Install Python packages from PyPI
 RUN pip install pip --upgrade
 RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
 
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
@@ -86,10 +86,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/python_pyenv_x64/Dockerfile b/tools/dockerfile/test/python_pyenv_x64/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..abb5f3c89b3ae94e15ae4f5b914ee23d996a5fee
--- /dev/null
+++ b/tools/dockerfile/test/python_pyenv_x64/Dockerfile
@@ -0,0 +1,112 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FROM debian:jessie
+
+# Install Git and basic packages.
+RUN apt-get update && apt-get install -y \
+  autoconf \
+  autotools-dev \
+  build-essential \
+  bzip2 \
+  ccache \
+  curl \
+  gcc \
+  gcc-multilib \
+  git \
+  golang \
+  gyp \
+  lcov \
+  libc6 \
+  libc6-dbg \
+  libc6-dev \
+  libgtest-dev \
+  libtool \
+  make \
+  perl \
+  strace \
+  python-dev \
+  python-setuptools \
+  python-yaml \
+  telnet \
+  unzip \
+  wget \
+  zip && apt-get clean
+
+#================
+# Build profiling
+RUN apt-get update && apt-get install -y time && apt-get clean
+
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
+# Install dependencies for pyenv
+RUN apt-get update && apt-get install -y \
+  libbz2-dev \
+  libncurses5-dev \
+  libncursesw5-dev \
+  libreadline-dev \
+  libsqlite3-dev \
+  libssl-dev \
+  llvm \
+  mercurial \
+  zlib1g-dev && apt-get clean
+
+# Install Pyenv and dev Python versions 3.5 and 3.6
+RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
+RUN pyenv update
+RUN pyenv install 3.5-dev
+RUN pyenv install 3.6-dev
+RUN pyenv local 3.5-dev 3.6-dev
+
+# Prepare ccache
+RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
+RUN ln -s /usr/bin/ccache /usr/local/bin/g++
+RUN ln -s /usr/bin/ccache /usr/local/bin/cc
+RUN ln -s /usr/bin/ccache /usr/local/bin/c++
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
+
+
+RUN mkdir /var/local/jenkins
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
index df8eef54385292da68685464894d61b56cb60af4..dae64e5c8cb199cce992a3220ce4be28a763d4cb 100644
--- a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #==================
 # Ruby dependencies
 
@@ -86,10 +101,6 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/c++
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang
 RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 
-#======================
-# Zookeeper dependencies
-# TODO(jtattermusch): is zookeeper still needed?
-RUN apt-get install -y libzookeeper-mt-dev
 
 RUN mkdir /var/local/jenkins
 
diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile
index 70a32c5586ad37dd6229523e9cb3ec73918b2122..f4b4831a649ee512460088b63657db06bd39f74d 100644
--- a/tools/dockerfile/test/sanity/Dockerfile
+++ b/tools/dockerfile/test/sanity/Dockerfile
@@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \
 # Build profiling
 RUN apt-get update && apt-get install -y time && apt-get clean
 
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0
+
 #========================
 # Sanity test dependencies
 RUN apt-get update && apt-get install -y \
diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++
index 7f9d2df6f6c026aacc53181ecf8ddbca6a61d65f..a2415e1217c70ceaf58cc260230a82c0806471c0 100644
--- a/tools/doxygen/Doxyfile.c++
+++ b/tools/doxygen/Doxyfile.c++
@@ -40,7 +40,7 @@ PROJECT_NAME           = "GRPC C++"
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.15.0-dev
+PROJECT_NUMBER         = 1.1.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index dcf1a4c8c4076ae644c70c3047d854796a53403b..945298b964c3c7a8206d35d227cd7109026fe65a 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -40,7 +40,7 @@ PROJECT_NAME           = "GRPC C++"
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.15.0-dev
+PROJECT_NUMBER         = 1.1.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
diff --git a/tools/doxygen/Doxyfile.c++.internal.orig b/tools/doxygen/Doxyfile.c++.internal.orig
deleted file mode 100644
index c214b3d3c84929bbf38905b56126e688549ca688..0000000000000000000000000000000000000000
--- a/tools/doxygen/Doxyfile.c++.internal.orig
+++ /dev/null
@@ -1,2505 +0,0 @@
-
-
-# Doxyfile 1.8.9.1
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME           = "GRPC C++"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER         = 0.15.0-dev
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          =
-
-# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
-# in the documentation. The maximum height of the logo should not exceed 55
-# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
-# the logo to the output directory.
-
-PROJECT_LOGO           =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = doc/ref/c++.internal
-
-# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS         = NO
-
-# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
-# characters to appear in the names of generated files. If set to NO, non-ASCII
-# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
-# U+3044.
-# The default value is: NO.
-
-ALLOW_UNICODE_NAMES    = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE        = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF       =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES        = YES
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH        =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF      = YES
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
-# page for each member. If set to NO, the documentation of a member will be part
-# of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE               = 2
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C  = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
-# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
-# Fortran. In the later case the parser tries to guess whether the code is fixed
-# or free formatted code, this is the default for Fortran type files), VHDL. For
-# instance to make doxygen treat .inc files as Fortran files (default is PHP),
-# and .f files as C (default is Fortran), use: inc=Fortran f=C.
-#
-# Note: For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-MARKDOWN_SUPPORT       = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by putting a % sign in front of the word or
-# globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-AUTOLINK_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL            = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC         = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO,
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. If set to YES, local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO, only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO, these classes will be included in the various overviews. This option
-# has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO, these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO, these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES, upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES       = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES, the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
-# append additional text to a page's title, such as Class Reference. If set to
-# YES the compound reference will be hidden.
-# The default value is: NO.
-
-HIDE_COMPOUND_REFERENCE= NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
-# grouped member an include statement to the documentation, telling the reader
-# which file to include in order to use the member.
-# The default value is: NO.
-
-SHOW_GROUPED_MEMB_INC  = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS       = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order. Note that
-# this will also influence the order of the classes in the class list.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
-# list. This list is created by putting \todo commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
-# list. This list is created by putting \test commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if <section_label> ... \endif and \cond <section_label>
-# ... \endcond blocks.
-
-ENABLED_SECTIONS       =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES, the
-# list will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. See also \cite for info how to create references.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS               = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR      = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO, doxygen will only warn about wrong or incomplete
-# parameter documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC       = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
-# Note: If this tag is empty the current directory is searched.
-
-INPUT                  = include/grpc++/alarm.h \
-include/grpc++/channel.h \
-include/grpc++/client_context.h \
-include/grpc++/completion_queue.h \
-include/grpc++/create_channel.h \
-include/grpc++/generic/async_generic_service.h \
-include/grpc++/generic/generic_stub.h \
-include/grpc++/grpc++.h \
-include/grpc++/impl/call.h \
-include/grpc++/impl/client_unary_call.h \
-include/grpc++/impl/codegen/core_codegen.h \
-include/grpc++/impl/grpc_library.h \
-include/grpc++/impl/method_handler_impl.h \
-include/grpc++/impl/rpc_method.h \
-include/grpc++/impl/rpc_service_method.h \
-include/grpc++/impl/serialization_traits.h \
-include/grpc++/impl/server_builder_option.h \
-include/grpc++/impl/server_builder_plugin.h \
-include/grpc++/impl/server_initializer.h \
-include/grpc++/impl/service_type.h \
-include/grpc++/impl/sync.h \
-include/grpc++/impl/sync_cxx11.h \
-include/grpc++/impl/sync_no_cxx11.h \
-include/grpc++/impl/thd.h \
-include/grpc++/impl/thd_cxx11.h \
-include/grpc++/impl/thd_no_cxx11.h \
-include/grpc++/security/auth_context.h \
-include/grpc++/security/auth_metadata_processor.h \
-include/grpc++/security/credentials.h \
-include/grpc++/security/server_credentials.h \
-include/grpc++/server.h \
-include/grpc++/server_builder.h \
-include/grpc++/server_context.h \
-include/grpc++/support/async_stream.h \
-include/grpc++/support/async_unary_call.h \
-include/grpc++/support/byte_buffer.h \
-include/grpc++/support/channel_arguments.h \
-include/grpc++/support/config.h \
-include/grpc++/support/slice.h \
-include/grpc++/support/status.h \
-include/grpc++/support/status_code_enum.h \
-include/grpc++/support/string_ref.h \
-include/grpc++/support/stub_options.h \
-include/grpc++/support/sync_stream.h \
-include/grpc++/support/time.h \
-include/grpc++/impl/codegen/async_stream.h \
-include/grpc++/impl/codegen/async_unary_call.h \
-include/grpc++/impl/codegen/call.h \
-include/grpc++/impl/codegen/call_hook.h \
-include/grpc++/impl/codegen/channel_interface.h \
-include/grpc++/impl/codegen/client_context.h \
-include/grpc++/impl/codegen/client_unary_call.h \
-include/grpc++/impl/codegen/completion_queue.h \
-include/grpc++/impl/codegen/completion_queue_tag.h \
-include/grpc++/impl/codegen/config.h \
-include/grpc++/impl/codegen/core_codegen_interface.h \
-include/grpc++/impl/codegen/create_auth_context.h \
-include/grpc++/impl/codegen/grpc_library.h \
-include/grpc++/impl/codegen/method_handler_impl.h \
-include/grpc++/impl/codegen/rpc_method.h \
-include/grpc++/impl/codegen/rpc_service_method.h \
-include/grpc++/impl/codegen/security/auth_context.h \
-include/grpc++/impl/codegen/serialization_traits.h \
-include/grpc++/impl/codegen/server_context.h \
-include/grpc++/impl/codegen/server_interface.h \
-include/grpc++/impl/codegen/service_type.h \
-include/grpc++/impl/codegen/status.h \
-include/grpc++/impl/codegen/status_code_enum.h \
-include/grpc++/impl/codegen/string_ref.h \
-include/grpc++/impl/codegen/stub_options.h \
-include/grpc++/impl/codegen/sync.h \
-include/grpc++/impl/codegen/sync_cxx11.h \
-include/grpc++/impl/codegen/sync_no_cxx11.h \
-include/grpc++/impl/codegen/sync_stream.h \
-include/grpc++/impl/codegen/time.h \
-include/grpc/impl/codegen/byte_buffer.h \
-include/grpc/impl/codegen/byte_buffer_reader.h \
-include/grpc/impl/codegen/compression_types.h \
-include/grpc/impl/codegen/connectivity_state.h \
-include/grpc/impl/codegen/grpc_types.h \
-include/grpc/impl/codegen/propagation_bits.h \
-include/grpc/impl/codegen/status.h \
-include/grpc/impl/codegen/alloc.h \
-include/grpc/impl/codegen/atm.h \
-include/grpc/impl/codegen/atm_gcc_atomic.h \
-include/grpc/impl/codegen/atm_gcc_sync.h \
-include/grpc/impl/codegen/atm_windows.h \
-include/grpc/impl/codegen/log.h \
-include/grpc/impl/codegen/port_platform.h \
-include/grpc/impl/codegen/slice.h \
-include/grpc/impl/codegen/slice_buffer.h \
-include/grpc/impl/codegen/sync.h \
-include/grpc/impl/codegen/sync_generic.h \
-include/grpc/impl/codegen/sync_posix.h \
-include/grpc/impl/codegen/sync_windows.h \
-include/grpc/impl/codegen/time.h \
-<<<<<<< HEAD
-include/grpc++/impl/codegen/config.h \
-include/grpc++/impl/codegen/config_protobuf.h \
-include/grpc++/support/config.h \
-include/grpc++/support/config_protobuf.h \
-include/grpc++/impl/codegen/core_codegen.h \
-=======
->>>>>>> d30d4e279c4a63effaa6e912fc00bd4ad96054c7
-src/cpp/client/secure_credentials.h \
-src/cpp/common/secure_auth_context.h \
-src/cpp/server/secure_server_credentials.h \
-src/cpp/client/create_channel_internal.h \
-src/cpp/server/dynamic_thread_pool.h \
-src/cpp/server/thread_pool_interface.h \
-src/cpp/client/secure_credentials.cc \
-src/cpp/common/auth_property_iterator.cc \
-src/cpp/common/secure_auth_context.cc \
-src/cpp/common/secure_channel_arguments.cc \
-src/cpp/common/secure_create_auth_context.cc \
-src/cpp/server/secure_server_credentials.cc \
-src/cpp/client/channel.cc \
-src/cpp/client/client_context.cc \
-src/cpp/client/create_channel.cc \
-src/cpp/client/create_channel_internal.cc \
-src/cpp/client/credentials.cc \
-src/cpp/client/generic_stub.cc \
-src/cpp/client/insecure_credentials.cc \
-src/cpp/common/channel_arguments.cc \
-src/cpp/common/completion_queue.cc \
-src/cpp/common/core_codegen.cc \
-src/cpp/common/rpc_method.cc \
-src/cpp/server/async_generic_service.cc \
-src/cpp/server/create_default_thread_pool.cc \
-src/cpp/server/dynamic_thread_pool.cc \
-src/cpp/server/insecure_server_credentials.cc \
-src/cpp/server/server.cc \
-src/cpp/server/server_builder.cc \
-src/cpp/server/server_context.cc \
-src/cpp/server/server_credentials.cc \
-src/cpp/util/byte_buffer.cc \
-src/cpp/util/slice.cc \
-src/cpp/util/status.cc \
-src/cpp/util/string_ref.cc \
-src/cpp/util/time.cc \
-src/cpp/codegen/codegen_init.cc
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS          =
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE              = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS       =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-# <filter> <input-file>
-#
-# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER         = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES         = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-SOURCE_TOOLTIPS        = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX     = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# cascading style sheets that are included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefore more robust against future updates.
-# Doxygen will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list). For an example see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_STYLESHEET  =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the style sheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP         = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET        = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP      = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE               =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler (hhc.exe). If non-empty,
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION           =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated
-# (YES) or that it should be included in the master .chm file (NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI           = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING     =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated
-# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
-# enables the Previous and Next buttons.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE          = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW      = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH         = 250
-
-# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_FORMAT         = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS     =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_CODEFILE       =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use <access key> + S
-# (what the <access key> is depends on the OS and browser, but it is typically
-# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
-# key> to jump into the search results window, the results can be navigated
-# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
-# the search. The filter options can be selected when the cursor is inside the
-# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
-# to select a filter and <Enter> or <escape> to activate or cancel the filter
-# option.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
-# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
-# setting. When disabled, doxygen will generate a PHP script for searching and
-# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
-# and searching needs to be provided by external tools. See the section
-# "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SERVER_BASED_SEARCH    = NO
-
-# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
-# search results.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/).
-#
-# See the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH        = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will return the search results when EXTERNAL_SEARCH is enabled.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/). See the section "External Indexing and
-# Searching" for details.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHENGINE_URL       =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-# The default file is: searchdata.xml.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHDATA_FILE        = searchdata.xml
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH_ID     =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
-# to a relative location where the documentation can be found. The format is:
-# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTRA_SEARCH_MAPPINGS  =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
-# The default value is: YES.
-
-GENERATE_LATEX         = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked.
-#
-# Note that when enabling USE_PDFLATEX this option is only used for generating
-# bitmaps for formulas in the HTML output, but not in the Makefile that is
-# written to the output directory.
-# The default file is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_CMD_NAME         = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
-# index for LaTeX.
-# The default file is: makeindex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used by the
-# printer.
-# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
-# 14 inches) and executive (7.25 x 10.5 inches).
-# The default value is: a4.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PAPER_TYPE             = a4
-
-# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
-# If left blank no extra packages will be included.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
-#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
-# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
-# string, for the replacement values of the other commands the user is referred
-# to HTML_HEADER.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer. See
-# LATEX_HEADER for more information on how to generate a default footer and what
-# special commands can be used inside the footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_FOOTER           =
-
-# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# LaTeX style sheets that are included after the standard style sheets created
-# by doxygen. Using this option one can overrule certain style aspects. Doxygen
-# will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list).
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_STYLESHEET =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the LATEX_OUTPUT output
-# directory. Note that the files will be copied as-is; there are no commands or
-# markers available.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_FILES      =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
-# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
-# contain links (just like the HTML output) instead of page references. This
-# makes the output suitable for online browsing using a PDF viewer.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES, to get a
-# higher quality PDF documentation.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BATCHMODE        = NO
-
-# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
-# index chapters (such as File Index, Compound Index, etc.) in the output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HIDE_INDICES     = NO
-
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. See
-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
-# The default value is: plain.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BIB_STYLE        = plain
-
-#---------------------------------------------------------------------------
-# Configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
-# RTF output is optimized for Word 97 and may not look too pretty with other RTF
-# readers/editors.
-# The default value is: NO.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: rtf.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
-# contain hyperlink fields. The RTF file will contain links (just like the HTML
-# output) instead of page references. This makes the output suitable for online
-# browsing using Word or some other Word compatible readers that support those
-# fields.
-#
-# Note: WordPad (write) and others do not support links.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_HYPERLINKS         = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
-# file, i.e. a series of assignments. You only have to provide replacements,
-# missing definitions are set to their default value.
-#
-# See also section "Doxygen usage" for information on how to generate the
-# default style sheet that doxygen normally uses.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's config file. A template extensions file can be generated
-# using doxygen -e rtf extensionFile.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_EXTENSIONS_FILE    =
-
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE        = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
-# classes and files.
-# The default value is: NO.
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it. A directory man3 will be created inside the directory specified by
-# MAN_OUTPUT.
-# The default directory is: man.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to the generated
-# man pages. In case the manual section does not start with a number, the number
-# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
-# optional.
-# The default value is: .3.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_EXTENSION          = .3
-
-# The MAN_SUBDIR tag determines the name of the directory created within
-# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
-# MAN_EXTENSION with the initial . removed.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_SUBDIR             =
-
-# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
-# will generate one additional man file for each entity documented in the real
-# man page(s). These additional files only source the real man page, but without
-# them the man command would be unable to find the correct page.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
-# captures the structure of the code including all documentation.
-# The default value is: NO.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: xml.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_OUTPUT             = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
-# listings (including syntax highlighting and cross-referencing information) to
-# the XML output. Note that enabling this will significantly increase the size
-# of the XML output.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
-# that can be used to generate PDF.
-# The default value is: NO.
-
-GENERATE_DOCBOOK       = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it.
-# The default directory is: docbook.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_OUTPUT         = docbook
-
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
-#---------------------------------------------------------------------------
-# Configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
-# AutoGen Definitions (see http://autogen.sf.net) file that captures the
-# structure of the code including all documentation. Note that this feature is
-# still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
-# file that captures the structure of the code including all documentation.
-#
-# Note that this feature is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
-# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
-# output from the Perl module output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
-# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO, the
-# size of the Perl module output will be much smaller and Perl will parse it
-# just the same.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file are
-# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
-# so different doxyrules.make files included by the same Makefile don't
-# overwrite each other's variables.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
-# C-preprocessor directives found in the sources and include files.
-# The default value is: YES.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
-# in the source code. If set to NO, only conditional compilation will be
-# performed. Macro expansion can be done in a controlled way by setting
-# EXPAND_ONLY_PREDEF to YES.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-MACRO_EXPANSION        = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
-# the macro expansion is limited to the macros specified with the PREDEFINED and
-# EXPAND_AS_DEFINED tags.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES, the include files in the
-# INCLUDE_PATH will be searched if a #include is found.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by the
-# preprocessor.
-# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will be
-# used.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that are
-# defined before the preprocessor is started (similar to the -D option of e.g.
-# gcc). The argument of the tag is a list of macros of the form: name or
-# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
-# is assumed. To prevent a macro definition from being undefined via #undef or
-# recursively expanded use the := operator instead of the = operator.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-PREDEFINED             = GRPC_FINAL= GRPC_OVERIDE=
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
-# tag can be used to specify a list of macro names that should be expanded. The
-# macro definition that is found in the sources will be used. Use the PREDEFINED
-# tag if you want to use a different macro definition that overrules the
-# definition found in the source code.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all references to function-like macros that are alone on a line, have
-# an all uppercase name, and do not end with a semicolon. Such function macros
-# are typically used for boiler-plate code, and will confuse the parser if not
-# removed.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES tag can be used to specify one or more tag files. For each tag
-# file the location of the external documentation should be added. The format of
-# a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where loc1 and loc2 can be relative or absolute paths or URLs. See the
-# section "Linking to external documentation" for more information about the use
-# of tag files.
-# Note: Each tag file must have a unique name (where the name does NOT include
-# the path). If a tag file is not located in the directory in which doxygen is
-# run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
-# tag file that is based on the input files it reads. See section "Linking to
-# external documentation" for more information about the usage of tag files.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
-# the class index. If set to NO, only the inherited external classes will be
-# listed.
-# The default value is: NO.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will be
-# listed.
-# The default value is: YES.
-
-EXTERNAL_GROUPS        = YES
-
-# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
-# the related pages index. If set to NO, only the current project's pages will
-# be listed.
-# The default value is: YES.
-
-EXTERNAL_PAGES         = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS         = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# You can include diagrams made with dia in doxygen documentation. Doxygen will
-# then run dia to produce the diagram and insert it in the documentation. The
-# DIA_PATH tag allows you to specify the directory where the dia binary resides.
-# If left empty dia is assumed to be found in the default search path.
-
-DIA_PATH               =
-
-# If set to YES the inheritance and collaboration graphs will hide inheritance
-# and usage relations if the target is undocumented or is not a class.
-# The default value is: YES.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
-# Bell Labs. The other options in this section have no effect if this option is
-# set to NO
-# The default value is: NO.
-
-HAVE_DOT               = YES
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
-# to run in parallel. When set to 0 doxygen will base this on the number of
-# processors available in the system. You can set it explicitly to a value
-# larger than 0 to get control over the balance between CPU load and processing
-# speed.
-# Minimum value: 0, maximum value: 32, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_NUM_THREADS        = 0
-
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTNAME           = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CLASS_GRAPH            = NO
-
-# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
-# graph for each documented class showing the direct and indirect implementation
-# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-COLLABORATION_GRAPH    = NO
-
-# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GROUP_GRAPHS           = NO
-
-# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
-# class node. If there are many fields or methods and many nodes the graph may
-# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
-# number of items for each type to make the size more manageable. Set this to 0
-# for no limit. Note that the threshold may be exceeded by 50% before the limit
-# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
-# but if the number exceeds 15, the total amount of fields shown is limited to
-# 10.
-# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
-# collaboration graphs will show the relations between templates and their
-# instances.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
-# YES then doxygen will generate a graph for each documented file showing the
-# direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDE_GRAPH          = NO
-
-# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
-# set to YES then doxygen will generate a graph for each documented file showing
-# the direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDED_BY_GRAPH      = NO
-
-# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALL_GRAPH             = NO
-
-# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALLER_GRAPH           = NO
-
-# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
-# hierarchy of all classes instead of a textual one.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GRAPHICAL_HIERARCHY    = NO
-
-# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
-# dependencies a directory has on other directories in a graphical way. The
-# dependency relations are determined by the #include relations between the
-# files in the directories.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DIRECTORY_GRAPH        = NO
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
-# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
-# to make the SVG files visible in IE 9+ (other browsers do not have this
-# requirement).
-# Possible values are: png, jpg, gif and svg.
-# The default value is: png.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-#
-# Note that this requires a modern browser other than Internet Explorer. Tested
-# and working are Firefox, Chrome, Safari, and Opera.
-# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
-# the SVG files visible. Older versions of IE do not have SVG support.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INTERACTIVE_SVG        = NO
-
-# The DOT_PATH tag can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the \dotfile
-# command).
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
-
-MSCFILE_DIRS           =
-
-# The DIAFILE_DIRS tag can be used to specify one or more directories that
-# contain dia files that are included in the documentation (see the \diafile
-# command).
-
-DIAFILE_DIRS           =
-
-# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
-# path where java can find the plantuml.jar file. If left blank, it is assumed
-# PlantUML is not used or called during a preprocessing step. Doxygen will
-# generate a warning when it encounters a \startuml command in this case and
-# will not generate output for the diagram.
-
-PLANTUML_JAR_PATH      =
-
-# When using plantuml, the specified paths are searched for files specified by
-# the !include statement in a plantuml block.
-
-PLANTUML_INCLUDE_PATH  =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
-# that will be shown in the graph. If the number of nodes in a graph becomes
-# larger than this value, doxygen will truncate the graph, which is visualized
-# by representing a node as a red box. Note that doxygen if the number of direct
-# children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
-# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-# Minimum value: 0, maximum value: 10000, default value: 50.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
-# generated by dot. A depth value of 3 means that only nodes reachable from the
-# root by following a path via at most 3 edges will be shown. Nodes that lay
-# further from the root node will be omitted. Note that setting this option to 1
-# or 2 may greatly reduce the computation time needed for large code bases. Also
-# note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-# Minimum value: 0, maximum value: 1000, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10) support
-# this, this feature is disabled by default.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
-# explaining the meaning of the various boxes and arrows in the dot generated
-# graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
-# files that are used to generate the various graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_CLEANUP            = YES
-
diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core
index 72102b2fc5026a05f398fd9192342e359434fcd1..e631c962b3d5155f73fcae2136abe295946db0b6 100644
--- a/tools/doxygen/Doxyfile.core
+++ b/tools/doxygen/Doxyfile.core
@@ -40,7 +40,7 @@ PROJECT_NAME           = "GRPC Core"
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.15.0-dev
+PROJECT_NUMBER         = 1.1.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index 8fa00dc5a44ed091dd2c1a2904e4960f48737cee..38f5fa65f45234fda05d38b74df29980b37dd379 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -40,7 +40,7 @@ PROJECT_NAME           = "GRPC Core"
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.15.0-dev
+PROJECT_NUMBER         = 1.1.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -808,6 +808,7 @@ src/core/lib/iomgr/closure.h \
 src/core/lib/iomgr/endpoint.h \
 src/core/lib/iomgr/endpoint_pair.h \
 src/core/lib/iomgr/error.h \
+src/core/lib/iomgr/ev_epoll_linux.h \
 src/core/lib/iomgr/ev_poll_and_epoll_posix.h \
 src/core/lib/iomgr/ev_poll_posix.h \
 src/core/lib/iomgr/ev_posix.h \
@@ -818,6 +819,7 @@ src/core/lib/iomgr/iomgr.h \
 src/core/lib/iomgr/iomgr_internal.h \
 src/core/lib/iomgr/iomgr_posix.h \
 src/core/lib/iomgr/load_file.h \
+src/core/lib/iomgr/network_status_tracker.h \
 src/core/lib/iomgr/polling_entity.h \
 src/core/lib/iomgr/pollset.h \
 src/core/lib/iomgr/pollset_set.h \
@@ -859,7 +861,6 @@ src/core/lib/surface/event_string.h \
 src/core/lib/surface/init.h \
 src/core/lib/surface/lame_client.h \
 src/core/lib/surface/server.h \
-src/core/lib/surface/surface_trace.h \
 src/core/lib/transport/byte_stream.h \
 src/core/lib/transport/connectivity_state.h \
 src/core/lib/transport/metadata.h \
@@ -964,6 +965,7 @@ src/core/lib/iomgr/endpoint.c \
 src/core/lib/iomgr/endpoint_pair_posix.c \
 src/core/lib/iomgr/endpoint_pair_windows.c \
 src/core/lib/iomgr/error.c \
+src/core/lib/iomgr/ev_epoll_linux.c \
 src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
 src/core/lib/iomgr/ev_poll_posix.c \
 src/core/lib/iomgr/ev_posix.c \
@@ -974,6 +976,7 @@ src/core/lib/iomgr/iomgr.c \
 src/core/lib/iomgr/iomgr_posix.c \
 src/core/lib/iomgr/iomgr_windows.c \
 src/core/lib/iomgr/load_file.c \
+src/core/lib/iomgr/network_status_tracker.c \
 src/core/lib/iomgr/polling_entity.c \
 src/core/lib/iomgr/pollset_set_windows.c \
 src/core/lib/iomgr/pollset_windows.c \
diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh
index 9b8d1d1eb7430c8132e444fd5848a6fe9f97e6ef..23a8f082b26ecc78606c446eca2bb14aa3801b72 100755
--- a/tools/gce/linux_performance_worker_init.sh
+++ b/tools/gce/linux_performance_worker_init.sh
@@ -90,13 +90,11 @@ sudo apt-get install -y libgflags-dev libgtest-dev libc++-dev clang
 # Python dependencies
 sudo pip install tabulate
 sudo pip install google-api-python-client
-sudo pip install tox
 
 curl -O https://bootstrap.pypa.io/get-pip.py
 sudo pypy get-pip.py
 sudo pypy -m pip install tabulate
 sudo pip install google-api-python-client
-sudo pip install tox
 
 # Node dependencies (nvm has to be installed under user jenkins)
 touch .profile
diff --git a/tools/profiling/latency_profile/run_latency_profile.sh b/tools/profiling/latency_profile/run_latency_profile.sh
index 54a25a9cb73ff23c34135c29298ab1fb7a77698d..618db202dc4b5bd83f600fa20962b4e0086809c3 100755
--- a/tools/profiling/latency_profile/run_latency_profile.sh
+++ b/tools/profiling/latency_profile/run_latency_profile.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 2015, Google Inc.
+# Copyright 2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -28,17 +28,61 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# format argument via
+# $ echo '{...}' | python -mjson.tool
+read -r -d '' SCENARIOS_JSON_ARG <<'EOF'
+{
+    "scenarios": [
+        {
+            "benchmark_seconds": 5,
+            "client_config": {
+                "client_channels": 1,
+                "client_type": "SYNC_CLIENT",
+                "histogram_params": {
+                    "max_possible": 60000000000.0,
+                    "resolution": 0.01
+                },
+                "load_params": {
+                    "closed_loop": {}
+                },
+                "outstanding_rpcs_per_channel": 1,
+                "payload_config": {
+                    "simple_params": {
+                        "req_size": 0,
+                        "resp_size": 0
+                    }
+                },
+                "rpc_type": "UNARY",
+                "security_params": {
+                    "server_host_override": "foo.test.google.fr",
+                    "use_test_ca": true
+                }
+            },
+            "name": "cpp_protobuf_sync_unary_ping_pong_secure",
+            "num_clients": 1,
+            "num_servers": 1,
+            "server_config": {
+                "core_limit": 1,
+                "security_params": {
+                    "server_host_override": "foo.test.google.fr",
+                    "use_test_ca": true
+                },
+                "server_type": "SYNC_SERVER"
+            },
+            "spawn_local_worker_count": 2,
+            "warmup_seconds": 5
+        }
+    ]
+}
+
+EOF
+
 set -ex
 
 cd $(dirname $0)/../../..
 
-BINS="sync_unary_ping_pong_test sync_streaming_ping_pong_test"
 CPUS=`python -c 'import multiprocessing; print multiprocessing.cpu_count()'`
 
-make CONFIG=basicprof -j$CPUS $BINS
-
-mkdir -p reports
-
 # try to use pypy for generating reports
 # each trace dumps 7-8gig of text to disk, and processing this into a report is
 # heavyweight - so any speed boost is worthwhile
@@ -49,35 +93,14 @@ else
   PYTHON=python2.7
 fi
 
-# start processes, interleaving report index generation
-echo '<html><head></head><body>' > reports/index.html
-for bin in $BINS
-do
-  bins/basicprof/$bin
-  mv latency_trace.txt $bin.trace
-  echo "<a href='$bin.txt'>$bin</a><br/>" >> reports/index.html
-done
-pids=""
-# generate report pages... this will take some time
-# run them in parallel: they take 1 cpu each
-for bin in $BINS
-do
-  $PYTHON tools/profiling/latency_profile/profile_analyzer.py \
-    --source=$bin.trace --fmt=simple > reports/$bin.txt &
-  pids+=" $!"
-done
-echo '</body></html>' >> reports/index.html
+make CONFIG=basicprof -j$CPUS qps_json_driver
 
-# make sure we kill the report generation if something goes wrong
-trap "kill $pids || true" 0
+mkdir -p reports
+bins/basicprof/qps_json_driver --scenarios_json="$SCENARIOS_JSON_ARG"
 
-# finally, wait for the background report generation to finish
-for pid in $pids
-do
-	if wait $pid
-	then
-		echo "Finished $pid"
-	else
-		exit 1
-	fi
-done
+echo '<html><head></head><body>Latency profile for:<br/>' > reports/index.html
+echo "<p><pre>${SCENARIOS_JSON_ARG}</pre></p>" >> reports/index.html
+echo '<p><pre>' >> reports/index.html
+$PYTHON tools/profiling/latency_profile/profile_analyzer.py \
+    --source=latency_trace.txt --fmt=simple >> reports/index.html
+echo '</pre></p></body></html>' >> reports/index.html
diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifact_targets.py
index bd1269ceb723542128db82d6c46d5b7fab608b52..e9267be58b4054ab4f976ca653c6f234934d22d1 100644
--- a/tools/run_tests/artifact_targets.py
+++ b/tools/run_tests/artifact_targets.py
@@ -40,7 +40,7 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
   environ['RUN_COMMAND'] = shell_command
 
   docker_args=[]
-  for k,v in environ.iteritems():
+  for k,v in environ.items():
     docker_args += ['-e', '%s=%s' % (k, v)]
   docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
                 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/build_artifact_python.bat
index fea0275426fad12b93d4f54f2ff270161d549816..7c8c2aa12d74211759beadf47f8de17b48ec5690 100644
--- a/tools/run_tests/build_artifact_python.bat
+++ b/tools/run_tests/build_artifact_python.bat
@@ -28,38 +28,24 @@
 @rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
-set NUGET=C:\nuget\nuget.exe
-%NUGET% restore vsprojects\grpc.sln || goto :error
-
-
-@call vsprojects\build_vs2013.bat vsprojects\grpc.sln /t:grpc_dll /p:Configuration=Release /p:PlatformToolset=v120 /p:Platform=Win32 || goto :error
-@call vsprojects\build_vs2013.bat vsprojects\grpc.sln /t:grpc_dll /p:Configuration=Release /p:PlatformToolset=v120 /p:Platform=x64   || goto :error
-
-mkdir src\python\grpcio\grpc\_cython\_windows
-
-copy /Y vsprojects\Release\grpc_dll.dll src\python\grpcio\grpc\_cython\_windows\grpc_c.32.python || goto :error
-copy /Y vsprojects\x64\Release\grpc_dll.dll src\python\grpcio\grpc\_cython\_windows\grpc_c.64.python || goto :error
-
-
 set PATH=C:\%1;C:\%1\scripts;C:\msys64\mingw%2\bin;%PATH%
 
 pip install --upgrade six
 pip install --upgrade setuptools
 pip install -rrequirements.txt
 
-set GRPC_PYTHON_USE_CUSTOM_BDIST=0
-set GRPC_PYTHON_BUILD_WITH_CYTHON=1
-
-@rem TODO(atash): maybe we could avoid the grpc_c.(32|64).python shim above if
-@rem this used the right python build?
-python setup.py bdist_wheel
-
-@rem Build gRPC Python tools
-@rem
 @rem Because this is windows and *everything seems to hate Windows* we have to
 @rem set all of these flags ourselves because Python won't help us (see the
 @rem setup.py of the grpcio_tools project).
 set GRPC_PYTHON_CFLAGS=-fno-wrapv -frtti -std=c++11
+
+@rem See https://sourceforge.net/p/mingw-w64/bugs/363/
+if %2 == 32 (
+  set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s
+) else (
+  set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime64 -D_timeb=__timeb64
+)
+
 @rem Further confusing things, MSYS2's mingw64 tries to dynamically link
 @rem libgcc, libstdc++, and winpthreads. We have to override this or our
 @rem extensions end up linking to MSYS2 DLLs, which the normal Python on
@@ -70,12 +56,19 @@ set GRPC_PYTHON_CFLAGS=-fno-wrapv -frtti -std=c++11
 python -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0])" > temp.txt
 set /p PYTHON_MSVCR=<temp.txt
 set GRPC_PYTHON_LDFLAGS=-static-libgcc -static-libstdc++ -mcrtdll=%PYTHON_MSVCR% -static -lpthread
+
+set GRPC_PYTHON_BUILD_WITH_CYTHON=1
+
+
+@rem Set up gRPC Python tools
 python tools\distrib\python\make_grpcio_tools.py
-if %2 == 32 (
-  python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32
-) else (
-  python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32 -DMS_WIN64
-)
+
+@rem Build gRPC Python extensions
+python setup.py build_ext -c mingw32
+python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32
+
+@rem Build gRPC Python distributions
+python setup.py bdist_wheel
 python tools\distrib\python\grpcio_tools\setup.py bdist_wheel
 
 mkdir artifacts
diff --git a/tools/run_tests/build_csharp_coreclr.bat b/tools/run_tests/build_csharp_coreclr.bat
new file mode 100644
index 0000000000000000000000000000000000000000..cead6d0e02c56427e5cd20924f6b0edb664d5ca8
--- /dev/null
+++ b/tools/run_tests/build_csharp_coreclr.bat
@@ -0,0 +1,44 @@
+@rem Copyright 2016, Google Inc.
+@rem All rights reserved.
+@rem
+@rem Redistribution and use in source and binary forms, with or without
+@rem modification, are permitted provided that the following conditions are
+@rem met:
+@rem
+@rem     * Redistributions of source code must retain the above copyright
+@rem notice, this list of conditions and the following disclaimer.
+@rem     * Redistributions in binary form must reproduce the above
+@rem copyright notice, this list of conditions and the following disclaimer
+@rem in the documentation and/or other materials provided with the
+@rem distribution.
+@rem     * Neither the name of Google Inc. nor the names of its
+@rem contributors may be used to endorse or promote products derived from
+@rem this software without specific prior written permission.
+@rem
+@rem THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+@rem "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+@rem LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+@rem A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+@rem OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+@rem SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+@rem LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+@rem DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+@rem THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+@rem (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+@rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+setlocal
+
+cd /d %~dp0\..\..\src\csharp
+
+dotnet restore . || goto :error
+
+dotnet build -f netstandard1.5 --configuration %MSBUILD_CONFIG% "**/project.json" || goto :error
+
+endlocal
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/__init__.py b/tools/run_tests/build_csharp_coreclr.sh
old mode 100644
new mode 100755
similarity index 88%
rename from src/python/grpcio/tests/unit/framework/interfaces/__init__.py
rename to tools/run_tests/build_csharp_coreclr.sh
index 708651910607ffb686d781713f6893567821b9fd..733b1a2083cd438aa2d67a9eee0acacbb0cf1b2b
--- a/src/python/grpcio/tests/unit/framework/interfaces/__init__.py
+++ b/tools/run_tests/build_csharp_coreclr.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
 # Copyright 2015, Google Inc.
 # All rights reserved.
 #
@@ -27,4 +28,11 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+set -ex
 
+cd $(dirname $0)/../../src/csharp
+
+# TODO(jtattermusch): introduce caching
+dotnet restore .
+
+dotnet build -f netstandard1.5 --configuration $MSBUILD_CONFIG '**/project.json'
diff --git a/src/python/grpcio/grpc/_adapter/_common.py b/tools/run_tests/build_package_csharp_coreclr.sh
old mode 100644
new mode 100755
similarity index 51%
rename from src/python/grpcio/grpc/_adapter/_common.py
rename to tools/run_tests/build_package_csharp_coreclr.sh
index 492849f4cbbdc0ac28074ccc0c5f28db37c9ea0d..e1c363da50bfc1221589b936ec1ab665789c57f6
--- a/src/python/grpcio/grpc/_adapter/_common.py
+++ b/tools/run_tests/build_package_csharp_coreclr.sh
@@ -1,4 +1,5 @@
-# Copyright 2015, Google Inc.
+#!/bin/bash
+# Copyright 2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,50 +28,32 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""State used by both invocation-side and service-side code."""
+set -ex
 
-import enum
+cd $(dirname $0)/../..
 
+mkdir -p artifacts/
 
-@enum.unique
-class HighWrite(enum.Enum):
-  """The possible categories of high-level write state."""
+cd src/csharp
 
-  OPEN = 'OPEN'
-  CLOSED = 'CLOSED'
+# IMPORTANT: NuGet packages generated by dotnet CLI are considered experimental.
+# The official nugets are generated by src/csharp/build_packages.bat
 
+mkdir -p nativelibs/windows_x86 nativelibs/windows_x64 \
+    nativelibs/linux_x86 nativelibs/linux_x64 \
+    nativelibs/macosx_x86 nativelibs/macosx_x64
 
-class WriteState(object):
-  """A description of the state of writing to an RPC.
+cp $EXTERNAL_GIT_ROOT/architecture=x86,language=csharp,platform=windows/artifacts/* nativelibs/windows_x86 || true
+cp $EXTERNAL_GIT_ROOT/architecture=x64,language=csharp,platform=windows/artifacts/* nativelibs/windows_x64 || true
+cp $EXTERNAL_GIT_ROOT/architecture=x86,language=csharp,platform=linux/artifacts/* nativelibs/linux_x86 || true
+cp $EXTERNAL_GIT_ROOT/architecture=x64,language=csharp,platform=linux/artifacts/* nativelibs/linux_x64 || true
+cp $EXTERNAL_GIT_ROOT/architecture=x86,language=csharp,platform=macos/artifacts/* nativelibs/macosx_x86 || true
+cp $EXTERNAL_GIT_ROOT/architecture=x64,language=csharp,platform=macos/artifacts/* nativelibs/macosx_x64 || true
 
-  Attributes:
-    low: A side-specific value describing the low-level state of writing.
-    high: A HighWrite value describing the high-level state of writing.
-    pending: A list of bytestrings for the RPC waiting to be written to the
-      other side of the RPC.
-  """
+dotnet restore .
 
-  def __init__(self, low, high, pending):
-    self.low = low
-    self.high = high
-    self.pending = pending
+dotnet pack --configuration Release Grpc.Core/project.json --output ../../artifacts
+dotnet pack --configuration Release Grpc.Auth/project.json --output ../../artifacts
+dotnet pack --configuration Release Grpc.HealthCheck/project.json --output ../../artifacts
 
-
-class CommonRPCState(object):
-  """A description of an RPC's state.
-
-  Attributes:
-    write: A WriteState describing the state of writing to the RPC.
-    sequence_number: The lowest-unused sequence number for use in generating
-      tickets locally describing the progress of the RPC.
-    deserializer: The behavior to be used to deserialize payload bytestreams
-      taken off the wire.
-    serializer: The behavior to be used to serialize payloads to be sent on the
-      wire.
-  """
-
-  def __init__(self, write, sequence_number, deserializer, serializer):
-    self.write = write
-    self.sequence_number = sequence_number
-    self.deserializer = deserializer
-    self.serializer = serializer
+tar -czf ../../artifacts/csharp_nugets_experimental.tar.gz ../../artifacts/*.nupkg
diff --git a/tools/run_tests/build_package_node.sh b/tools/run_tests/build_package_node.sh
index 6f7211b53fe2727c67a5e0f006e353c64adecf90..ff4cfdb8bf97590b4ad128edb69a12bd5d4ce861 100755
--- a/tools/run_tests/build_package_node.sh
+++ b/tools/run_tests/build_package_node.sh
@@ -86,6 +86,7 @@ for arch in {x86,x64}; do
     cp $input_dir/protoc* bin/
     cp $input_dir/grpc_node_plugin* bin/
     mkdir -p bin/google/protobuf
+    mkdir -p bin/google/protobuf/compiler  # needed for plugin.proto
     for proto in "${well_known_protos[@]}"; do
       cp $base/third_party/protobuf/src/google/protobuf/$proto.proto bin/google/protobuf/$proto.proto
     done
diff --git a/tools/run_tests/build_package_ruby.sh b/tools/run_tests/build_package_ruby.sh
index 4116e29debbf10219e91a3940b8b3f4cf0666f12..0a755bddb0b36626728c02a9c4d7036fdffe8891 100755
--- a/tools/run_tests/build_package_ruby.sh
+++ b/tools/run_tests/build_package_ruby.sh
@@ -59,6 +59,7 @@ for arch in {x86,x64}; do
     input_dir="$EXTERNAL_GIT_ROOT/architecture=$arch,language=protoc,platform=$plat/artifacts"
     output_dir="$base/src/ruby/tools/bin/${ruby_arch}-${plat}"
     mkdir -p $output_dir/google/protobuf
+    mkdir -p $output_dir/google/protobuf/compiler  # needed for plugin.proto
     cp $input_dir/protoc* $output_dir/
     cp $input_dir/grpc_ruby_plugin* $output_dir/
     for proto in "${well_known_protos[@]}"; do
diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh
index b1c90df824c495e83cd9dc380eeee7edec9e2d02..a3fa8200d5db6338b94c865bba4f6630a0c994a0 100755
--- a/tools/run_tests/build_python.sh
+++ b/tools/run_tests/build_python.sh
@@ -33,44 +33,151 @@ set -ex
 # change to grpc repo root
 cd $(dirname $0)/../..
 
-TOX_PYTHON_ENV="$1"
-PY_VERSION="${TOX_PYTHON_ENV: -2}"
+##########################
+# Portability operations #
+##########################
+
+PLATFORM=`uname -s`
+
+function is_mingw() {
+  if [ "${PLATFORM/MINGW}" != "$PLATFORM" ]; then
+    echo true
+  else
+    exit 1
+  fi
+}
+
+function is_darwin() {
+  if [ "${PLATFORM/Darwin}" != "$PLATFORM" ]; then
+    echo true
+  else
+    exit 1
+  fi
+}
+
+function is_linux() {
+  if [ "${PLATFORM/Linux}" != "$PLATFORM" ]; then
+    echo true
+  else
+    exit 1
+  fi
+}
+
+# Associated virtual environment name for the given python command.
+function venv() {
+  $1 -c "import sys; print('py{}{}'.format(*sys.version_info[:2]))"
+}
+
+# Path to python executable within a virtual environment depending on the
+# system.
+function venv_relative_python() {
+  if [ $(is_mingw) ]; then
+    echo 'Scripts/python.exe'
+  else
+    echo 'bin/python'
+  fi
+}
+
+# Distutils toolchain to use depending on the system.
+function toolchain() {
+  if [ $(is_mingw) ]; then
+    echo 'mingw32'
+  else
+    echo 'unix'
+  fi
+}
+
+# Command to invoke the linux command `realpath` or equivalent.
+function script_realpath() {
+  # Find `realpath`
+  if [ -x "$(command -v realpath)" ]; then
+    realpath "$@"
+  elif [ -x "$(command -v grealpath)" ]; then
+    grealpath "$@"
+  else
+    exit 1
+  fi
+}
+
+####################
+# Script Arguments #
+####################
+
+PYTHON=${1:-python2.7}
+VENV=${2:-$(venv $PYTHON)}
+VENV_RELATIVE_PYTHON=${3:-$(venv_relative_python)}
+TOOLCHAIN=${4:-$(toolchain)}
 
 ROOT=`pwd`
-export LD_LIBRARY_PATH=$ROOT/libs/$CONFIG
-export DYLD_LIBRARY_PATH=$ROOT/libs/$CONFIG
-export PATH=$ROOT/bins/$CONFIG:$ROOT/bins/$CONFIG/protobuf:$PATH
-export CFLAGS="-I$ROOT/include -std=gnu99"
-export LDFLAGS="-L$ROOT/libs/$CONFIG"
+export CFLAGS="-I$ROOT/include -std=gnu99 -fno-wrapv $CFLAGS"
 export GRPC_PYTHON_BUILD_WITH_CYTHON=1
-export GRPC_PYTHON_USE_PRECOMPILED_BINARIES=0
-
-if [ "$CONFIG" = "gcov" ]
-then
-  export GRPC_PYTHON_ENABLE_CYTHON_TRACING=1
-fi
 
-tox -e ${TOX_PYTHON_ENV} --notest
+# Default python on the host to fall back to when instantiating e.g. the
+# virtualenv.
+HOST_PYTHON=${HOST_PYTHON:-python}
 
-# We force the .so naming convention in PEP 3149 for side by side installation support
-# Note this is the default in Python3, but explicitly disabled for Darwin, so we only
-# use this hack for our testing environment.
-if [ "$PY_VERSION" -gt "27" ]
-then
-  mv $ROOT/src/python/grpcio/grpc/_cython/cygrpc.so $ROOT/src/python/grpcio/grpc/_cython/cygrpc.so.backup || true
+# If ccache is available on Linux, use it.
+if [ $(is_linux) ]; then
+  # We're not on Darwin (Mac OS X)
+  if [ -x "$(command -v ccache)" ]; then
+    if [ -x "$(command -v gcc)" ]; then
+      export CC='ccache gcc'
+    elif [ -x "$(command -v clang)" ]; then
+      export CC='ccache clang'
+    fi
+  fi
+fi
+# TODO(atash) consider conceptualizing MinGW as a first-class platform and move
+# these flags into our `setup.py`s
+if [ $(is_mingw) ]; then
+  # We're on MinGW, and our CFLAGS and LDFLAGS will be eaten by the void. Use
+  # our work-around environment variables instead.
+  PYTHON_MSVCR=`$PYTHON -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0])"`
+  export GRPC_PYTHON_LDFLAGS="-static-libgcc -static-libstdc++ -mcrtdll=$PYTHON_MSVCR -static -lpthread"
+  # See https://sourceforge.net/p/mingw-w64/bugs/363/
+  export GRPC_PYTHON_CFLAGS="-D_ftime=_ftime64 -D_timeb=__timeb64"
+  # TODO(atash) set these flags for only grpcio-tools (they don't do any harm to
+  # grpcio, but they result in noisy warnings).
+  export GRPC_PYTHON_CFLAGS="-frtti -std=c++11 $GRPC_PYTHON_CFLAGS"
 fi
 
-$ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/setup.py build
-$ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/setup.py build_py
-$ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/setup.py build_ext --inplace
-$ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/setup.py gather --test
+############################
+# Perform build operations #
+############################
 
-if [ "$PY_VERSION" -gt "27" ]
-then
-  mv $ROOT/src/python/grpcio/grpc/_cython/cygrpc.so $ROOT/src/python/grpcio/grpc/_cython/cygrpc.cpython-${PY_VERSION}m.so || true
-  mv $ROOT/src/python/grpcio/grpc/_cython/cygrpc.so.backup $ROOT/src/python/grpcio/grpc/_cython/cygrpc.so || true
-fi
+# Instnatiate the virtualenv, preferring to do so from the relevant python
+# version. Even if these commands fail (e.g. on Windows due to name conflicts)
+# it's possible that the virtualenv is still usable and we trust the tester to
+# be able to 'figure it out' instead of us e.g. doing potentially expensive and
+# unnecessary error recovery by `rm -rf`ing the virtualenv.
+($PYTHON -m virtualenv $VENV ||
+ $HOST_PYTHON -m virtualenv -p $PYTHON $VENV ||
+ true)
+VENV_PYTHON=`script_realpath -s "$VENV/$VENV_RELATIVE_PYTHON"`
+
+# pip-installs the directory specified. Used because on MSYS the vanilla Windows
+# Python gets confused when parsing paths.
+pip_install_dir() {
+  PWD=`pwd`
+  cd $1
+  ($VENV_PYTHON setup.py build_ext -c $TOOLCHAIN || true)
+  # install the dependencies
+  $VENV_PYTHON -m pip install --upgrade .
+  # ensure that we've reinstalled the test packages
+  $VENV_PYTHON -m pip install --upgrade --force-reinstall --no-deps .
+  cd $PWD
+}
 
-# Build the health checker
-$ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/src/python/grpcio_health_checking/setup.py build
-$ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/src/python/grpcio_health_checking/setup.py build_py
+$VENV_PYTHON -m pip install --upgrade pip setuptools
+$VENV_PYTHON -m pip install cython
+pip_install_dir $ROOT
+$VENV_PYTHON $ROOT/tools/distrib/python/make_grpcio_tools.py
+pip_install_dir $ROOT/tools/distrib/python/grpcio_tools
+# TODO(atash) figure out namespace packages and grpcio-tools and auditwheel
+# etc...
+pip_install_dir $ROOT
+$VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py preprocess
+pip_install_dir $ROOT/src/python/grpcio_health_checking
+$VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py preprocess
+$VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py build_proto_modules
+pip_install_dir $ROOT/src/python/grpcio_tests
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/__init__.py b/tools/run_tests/build_python_msys2.sh
similarity index 90%
rename from src/python/grpcio/tests/unit/framework/interfaces/face/__init__.py
rename to tools/run_tests/build_python_msys2.sh
index 708651910607ffb686d781713f6893567821b9fd..6e9d3690180d0c83f7f7cbeb049e1ff532594b99 100644
--- a/src/python/grpcio/tests/unit/framework/interfaces/face/__init__.py
+++ b/tools/run_tests/build_python_msys2.sh
@@ -1,4 +1,5 @@
-# Copyright 2015, Google Inc.
+#!/bin/bash
+# Copyright 2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,4 +28,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+set -ex
 
+BUILD_PYTHON=`realpath "$(dirname $0)/build_python.sh"`
+export MSYSTEM=$1
+shift 1
+bash --login $BUILD_PYTHON "$@"
diff --git a/tools/run_tests/distribtest_targets.py b/tools/run_tests/distribtest_targets.py
index 1a7aa0bfc805ebd139c2fd8c370010d370feeaed..7930f2a0a4c424b2121436aaa757893606f23972 100644
--- a/tools/run_tests/distribtest_targets.py
+++ b/tools/run_tests/distribtest_targets.py
@@ -41,7 +41,7 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
   environ['RELATIVE_COPY_PATH'] = 'test/distrib'
 
   docker_args=[]
-  for k,v in environ.iteritems():
+  for k,v in environ.items():
     docker_args += ['-e', '%s=%s' % (k, v)]
   docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
                 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
diff --git a/tools/run_tests/dockerjob.py b/tools/run_tests/dockerjob.py
index 326c4faed95629dbc7236eeec4562e0674e769d9..4a7e61b3c4e3be785803b675d5658b04935f8f2d 100755
--- a/tools/run_tests/dockerjob.py
+++ b/tools/run_tests/dockerjob.py
@@ -29,6 +29,8 @@
 
 """Helpers to run docker instances as jobs."""
 
+from __future__ import print_function
+
 import jobset
 import tempfile
 import time
@@ -95,7 +97,7 @@ def remove_image(image, skip_nonexistent=False, max_retries=10):
                        stderr=subprocess.STDOUT) == 0:
       return True
     time.sleep(2)
-  print 'Failed to remove docker image %s' % image
+  print('Failed to remove docker image %s' % image)
   return False
 
 
@@ -104,7 +106,7 @@ class DockerJob:
 
   def __init__(self, spec):
     self._spec = spec
-    self._job = jobset.Job(spec, bin_hash=None, newline_on_success=True, travis=True, add_env={})
+    self._job = jobset.Job(spec, newline_on_success=True, travis=True, add_env={})
     self._container_name = spec.container_name
 
   def mapped_port(self, port):
@@ -118,4 +120,4 @@ class DockerJob:
 
   def is_running(self):
     """Polls a job and returns True if given job is still running."""
-    return self._job.state(jobset.NoCache()) == jobset._RUNNING
+    return self._job.state() == jobset._RUNNING
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index d3259e724dfedacbecf7c32a6e920c34daa64788..b6fb6318e007e156e30ac2e9c3b6929abc947b20 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -29,7 +29,8 @@
 
 """Run a group of subprocesses and then finish."""
 
-import hashlib
+from __future__ import print_function
+
 import multiprocessing
 import os
 import platform
@@ -48,6 +49,12 @@ measure_cpu_costs = False
 _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
 _MAX_RESULT_SIZE = 8192
 
+def sanitized_environment(env):
+  sanitized = {}
+  for key, value in env.items():
+    sanitized[str(key).encode()] = str(value).encode()
+  return sanitized
+
 def platform_string():
   if platform.system() == 'Windows':
     return 'windows'
@@ -118,8 +125,8 @@ def message(tag, msg, explanatory_text=None, do_newline=False):
   try:
     if platform_string() == 'windows' or not sys.stdout.isatty():
       if explanatory_text:
-        print explanatory_text
-      print '%s: %s' % (tag, msg)
+        print(explanatory_text)
+      print('%s: %s' % (tag, msg))
       return
     sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
         _BEGINNING_OF_LINE,
@@ -149,7 +156,7 @@ def which(filename):
 class JobSpec(object):
   """Specifies what to run for a job."""
 
-  def __init__(self, cmdline, shortname=None, environ=None, hash_targets=None,
+  def __init__(self, cmdline, shortname=None, environ=None,
                cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
                timeout_retries=0, kill_handler=None, cpu_cost=1.0,
                verbose_success=False):
@@ -157,19 +164,14 @@ class JobSpec(object):
     Arguments:
       cmdline: a list of arguments to pass as the command line
       environ: a dictionary of environment variables to set in the child process
-      hash_targets: which files to include in the hash representing the jobs version
-                    (or empty, indicating the job should not be hashed)
       kill_handler: a handler that will be called whenever job.kill() is invoked
       cpu_cost: number of cores per second this job needs
     """
     if environ is None:
       environ = {}
-    if hash_targets is None:
-      hash_targets = []
     self.cmdline = cmdline
     self.environ = environ
     self.shortname = cmdline[0] if shortname is None else shortname
-    self.hash_targets = hash_targets or []
     self.cwd = cwd
     self.shell = shell
     self.timeout_seconds = timeout_seconds
@@ -180,7 +182,7 @@ class JobSpec(object):
     self.verbose_success = verbose_success
 
   def identity(self):
-    return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
+    return '%r %r' % (self.cmdline, self.environ)
 
   def __hash__(self):
     return hash(self.identity())
@@ -205,9 +207,8 @@ class JobResult(object):
 class Job(object):
   """Manages one job."""
 
-  def __init__(self, spec, bin_hash, newline_on_success, travis, add_env):
+  def __init__(self, spec, newline_on_success, travis, add_env):
     self._spec = spec
-    self._bin_hash = bin_hash
     self._newline_on_success = newline_on_success
     self._travis = travis
     self._add_env = add_env.copy()
@@ -226,6 +227,7 @@ class Job(object):
     env = dict(os.environ)
     env.update(self._spec.environ)
     env.update(self._add_env)
+    env = sanitized_environment(env)
     self._start = time.time()
     cmdline = self._spec.cmdline
     if measure_cpu_costs:
@@ -249,7 +251,7 @@ class Job(object):
       self._process = try_start()
     self._state = _RUNNING
 
-  def state(self, update_cache):
+  def state(self):
     """Poll current state of the job. Prints messages at completion."""
     def stdout(self=self):
       self._tempfile.seek(0)
@@ -293,8 +295,6 @@ class Job(object):
             stdout() if self._spec.verbose_success else None,
             do_newline=self._newline_on_success or self._travis)
         self.result.state = 'PASSED'
-        if self._bin_hash:
-          update_cache.finished(self._spec.identity(), self._bin_hash)
     elif (self._state == _RUNNING and
           self._spec.timeout_seconds is not None and
           time.time() - self._start > self._spec.timeout_seconds):
@@ -329,7 +329,7 @@ class Jobset(object):
   """Manages one run of jobs."""
 
   def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
-               stop_on_failure, add_env, cache):
+               stop_on_failure, add_env):
     self._running = set()
     self._check_cancelled = check_cancelled
     self._cancelled = False
@@ -338,9 +338,7 @@ class Jobset(object):
     self._maxjobs = maxjobs
     self._newline_on_success = newline_on_success
     self._travis = travis
-    self._cache = cache
     self._stop_on_failure = stop_on_failure
-    self._hashes = {}
     self._add_env = add_env
     self.resultset = {}
     self._remaining = None
@@ -367,29 +365,13 @@ class Jobset(object):
       if current_cpu_cost + spec.cpu_cost <= self._maxjobs: break
       self.reap()
     if self.cancelled(): return False
-    if spec.hash_targets:
-      if spec.identity() in self._hashes:
-        bin_hash = self._hashes[spec.identity()]
-      else:
-        bin_hash = hashlib.sha1()
-        for fn in spec.hash_targets:
-          with open(which(fn)) as f:
-            bin_hash.update(f.read())
-        bin_hash = bin_hash.hexdigest()
-        self._hashes[spec.identity()] = bin_hash
-      should_run = self._cache.should_run(spec.identity(), bin_hash)
-    else:
-      bin_hash = None
-      should_run = True
-    if should_run:
-      job = Job(spec,
-                bin_hash,
-                self._newline_on_success,
-                self._travis,
-                self._add_env)
-      self._running.add(job)
-      if not self.resultset.has_key(job.GetSpec().shortname):
-        self.resultset[job.GetSpec().shortname] = []
+    job = Job(spec,
+              self._newline_on_success,
+              self._travis,
+              self._add_env)
+    self._running.add(job)
+    if job.GetSpec().shortname not in self.resultset:
+      self.resultset[job.GetSpec().shortname] = []
     return True
 
   def reap(self):
@@ -397,7 +379,7 @@ class Jobset(object):
     while self._running:
       dead = set()
       for job in self._running:
-        st = job.state(self._cache)
+        st = job.state()
         if st == _RUNNING: continue
         if st == _FAILURE or st == _KILLED:
           self._failures += 1
@@ -450,15 +432,6 @@ def _never_cancelled():
   return False
 
 
-# cache class that caches nothing
-class NoCache(object):
-  def should_run(self, cmdline, bin_hash):
-    return True
-
-  def finished(self, cmdline, bin_hash):
-    pass
-
-
 def tag_remaining(xs):
   staging = []
   for x in xs:
@@ -477,12 +450,10 @@ def run(cmdlines,
         travis=False,
         infinite_runs=False,
         stop_on_failure=False,
-        cache=None,
         add_env={}):
   js = Jobset(check_cancelled,
               maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
-              newline_on_success, travis, stop_on_failure, add_env,
-              cache if cache is not None else NoCache())
+              newline_on_success, travis, stop_on_failure, add_env)
   for cmdline, remaining in tag_remaining(cmdlines):
     if not js.start(cmdline):
       break
diff --git a/tools/run_tests/package_targets.py b/tools/run_tests/package_targets.py
index 820b539b59fcd9988b18387634e05a1fe2d1bfcf..ce3f08dfbc98dc6925b60f7fd114aa5cecf7268f 100644
--- a/tools/run_tests/package_targets.py
+++ b/tools/run_tests/package_targets.py
@@ -39,7 +39,7 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
   environ['RUN_COMMAND'] = shell_command
 
   docker_args=[]
-  for k,v in environ.iteritems():
+  for k,v in environ.items():
     docker_args += ['-e', '%s=%s' % (k, v)]
   docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
                 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
@@ -71,18 +71,29 @@ def create_jobspec(name, cmdline, environ=None, cwd=None, shell=False,
 class CSharpPackage:
   """Builds C# nuget packages."""
 
-  def __init__(self):
-    self.name = 'csharp_package'
-    self.labels = ['package', 'csharp', 'windows']
+  def __init__(self, use_coreclr=False):
+    self.use_coreclr = use_coreclr
+    self.name = 'csharp_package_coreclr' if use_coreclr else 'csharp_package'
+    self.labels = ['package', 'csharp']
+    if use_coreclr:
+      self.labels += ['linux']
+    else:
+      self.labels += ['windows']
 
   def pre_build_jobspecs(self):
     return []
 
   def build_jobspec(self):
-    return create_jobspec(self.name,
-                          ['build_packages.bat'],
-                          cwd='src\\csharp',
-                          shell=True)
+    if self.use_coreclr:
+      return create_docker_jobspec(
+          self.name,
+          'tools/dockerfile/test/csharp_coreclr_x64',
+          'tools/run_tests/build_package_csharp_coreclr.sh')
+    else:
+      return create_jobspec(self.name,
+                            ['build_packages.bat'],
+                            cwd='src\\csharp',
+                            shell=True)
 
   def __str__(self):
     return self.name
@@ -159,6 +170,7 @@ class PHPPackage:
 def targets():
   """Gets list of supported targets"""
   return [CSharpPackage(),
+          CSharpPackage(use_coreclr=True),
           NodePackage(),
           RubyPackage(),
           PythonPackage(),
diff --git a/tools/run_tests/performance/run_worker_python.sh b/tools/run_tests/performance/run_worker_python.sh
index 0da8deda58e909deaacdf88d8cf891e61a862fe1..06cf172d6fece222ea07299f32d5a11d52c927f1 100755
--- a/tools/run_tests/performance/run_worker_python.sh
+++ b/tools/run_tests/performance/run_worker_python.sh
@@ -32,4 +32,4 @@ set -ex
 
 cd $(dirname $0)/../../..
 
-PYTHONPATH=src/python/grpcio:src/python/gens .tox/py27/bin/python src/python/grpcio/tests/qps/qps_worker.py $@
+PYTHONPATH=src/python/grpcio_tests:src/python/gens py27/bin/python src/python/grpcio_tests/tests/qps/qps_worker.py $@
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 2d5130e1e8682f1fa084cf5a1918ffbbfd97681e..4dfd01fc6636cd57e87b33f309c7607c04fbe322 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -387,45 +387,44 @@ class PythonLanguage:
     return 500
 
   def scenarios(self):
-    # TODO(issue #6522): Empty streaming requests does not work for python
-    #yield _ping_pong_scenario(
-    #    'python_generic_async_streaming_ping_pong', rpc_type='STREAMING',
-    #    client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-    #    use_generic_payload=True,
-    #    categories=[SMOKETEST])
+    yield _ping_pong_scenario(
+        'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING',
+        client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
+        use_generic_payload=True,
+        categories=[SMOKETEST])
 
     yield _ping_pong_scenario(
         'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER')
+        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
 
     yield _ping_pong_scenario(
         'python_protobuf_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='SYNC_SERVER')
+        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
 
     yield _ping_pong_scenario(
         'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
         categories=[SMOKETEST])
 
     yield _ping_pong_scenario(
         'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
         unconstrained_client='sync')
 
     yield _ping_pong_scenario(
         'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
         unconstrained_client='sync')
 
     yield _ping_pong_scenario(
         'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
         server_language='c++', server_core_limit=1, async_server_threads=1,
         categories=[SMOKETEST])
 
     yield _ping_pong_scenario(
         'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
         server_language='c++', server_core_limit=1, async_server_threads=1)
 
   def __str__(self):
diff --git a/tools/run_tests/port_server.py b/tools/run_tests/port_server.py
index 14e82b601eadbef7062397a2c33c791bff43318c..e9b3f7ff7957c4fdfbea8d1be2dbbbc96ba55176 100755
--- a/tools/run_tests/port_server.py
+++ b/tools/run_tests/port_server.py
@@ -30,8 +30,10 @@
 
 """Manage TCP ports for unit tests; started by run_tests.py"""
 
+from __future__ import print_function
+
 import argparse
-import BaseHTTPServer
+from six.moves import BaseHTTPServer
 import hashlib
 import os
 import socket
@@ -42,11 +44,11 @@ import time
 # increment this number whenever making a change to ensure that
 # the changes are picked up by running CI servers
 # note that all changes must be backwards compatible
-_MY_VERSION = 7
+_MY_VERSION = 9
 
 
 if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
-  print _MY_VERSION
+  print(_MY_VERSION)
   sys.exit(0)
 
 
@@ -62,7 +64,7 @@ if args.logfile is not None:
   sys.stderr = open(args.logfile, 'w')
   sys.stdout = sys.stderr
 
-print 'port server running on port %d' % args.port
+print('port server running on port %d' % args.port)
 
 pool = []
 in_use = {}
@@ -70,7 +72,7 @@ in_use = {}
 
 def refill_pool(max_timeout, req):
   """Scan for ports not marked for being in use"""
-  for i in range(1025, 32767):
+  for i in range(1025, 32766):
     if len(pool) > 100: break
     if i in in_use:
       age = time.time() - in_use[i]
@@ -110,6 +112,11 @@ keep_running = True
 
 
 class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
+  
+  def setup(self):
+    # If the client is unreachable for 5 seconds, close the connection
+    self.timeout = 5
+    BaseHTTPServer.BaseHTTPRequestHandler.setup(self)
 
   def do_GET(self):
     global keep_running
@@ -147,7 +154,7 @@ class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
       self.send_header('Content-Type', 'text/plain')
       self.end_headers()
       now = time.time()
-      self.wfile.write(yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.iteritems())}))
+      self.wfile.write(yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.items())}))
     elif self.path == '/quitquitquit':
       self.send_response(200)
       self.end_headers()
@@ -159,4 +166,4 @@ while keep_running:
   httpd.handle_request()
   sys.stderr.flush()
 
-print 'done'
+print('done')
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 35dcaca3d34e731f22367d19492003bb85fd4bab..5648a694cd0db4881b093891ec5384c9e4f23316 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -29,6 +29,8 @@
 
 """Generate XML and HTML test reports."""
 
+from __future__ import print_function
+
 try:
   from mako.runtime import Context
   from mako.template import Template
@@ -60,7 +62,7 @@ def render_junit_xml_report(resultset, xml_report):
   root = ET.Element('testsuites')
   testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', 
                             name='tests')
-  for shortname, results in resultset.iteritems(): 
+  for shortname, results in resultset.items():
     for result in results:
       xml_test = ET.SubElement(testsuite, 'testcase', name=shortname) 
       if result.elapsed_time:
@@ -83,10 +85,10 @@ def render_interop_html_report(
   try:
     mytemplate = Template(filename=template_file, format_exceptions=True)
   except NameError:
-    print 'Mako template is not installed. Skipping HTML report generation.'
+    print('Mako template is not installed. Skipping HTML report generation.')
     return
   except IOError as e:
-    print 'Failed to find the template %s: %s' % (template_file, e)
+    print('Failed to find the template %s: %s' % (template_file, e))
     return
 
   sorted_test_cases = sorted(test_cases)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 0787637d75851976d6e7fbe4a6cbe38c6a4b9e66..f9065c5bfd36ad04eea52ffa10ab90ae4da55580 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -30,6 +30,8 @@
 
 """Run interop (cross-language) tests in parallel."""
 
+from __future__ import print_function
+
 import argparse
 import atexit
 import dockerjob
@@ -54,8 +56,13 @@ os.chdir(ROOT)
 
 _DEFAULT_SERVER_PORT=8080
 
-_SKIP_COMPRESSION = ['large_compressed_unary',
-                     'server_compressed_streaming']
+_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
+                            'client_compressed_streaming']
+
+_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
+                            'server_compressed_streaming']
+
+_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
 
 _SKIP_ADVANCED = ['custom_metadata', 'status_code_and_message',
                   'unimplemented_method']
@@ -111,7 +118,7 @@ class CSharpLanguage:
     return {}
 
   def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION
+    return _SKIP_SERVER_COMPRESSION
 
   def unimplemented_test_cases_server(self):
     return _SKIP_COMPRESSION
@@ -299,8 +306,11 @@ class PythonLanguage:
 
   def client_cmd(self, args):
     return [
-        'tox -einterop_client --',
-        ' '.join(args)
+        'py27/bin/python',
+        'src/python/grpcio_tests/setup.py',
+        'run_interop',
+        '--client',
+        '--args="{}"'.format(' '.join(args))
     ]
 
   def cloud_to_prod_env(self):
@@ -308,8 +318,11 @@ class PythonLanguage:
 
   def server_cmd(self, args):
     return [
-        'tox -einterop_server --',
-        ' '.join(args) + ' --use_tls=true'
+        'py27/bin/python',
+        'src/python/grpcio_tests/setup.py',
+        'run_interop',
+        '--server',
+        '--args="{}"'.format(' '.join(args) + ' --use_tls=true')
     ]
 
   def global_env(self):
@@ -345,7 +358,8 @@ _TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
                'cancel_after_begin', 'cancel_after_first_response',
                'timeout_on_sleeping_server', 'custom_metadata',
                'status_code_and_message', 'unimplemented_method',
-               'large_compressed_unary', 'server_compressed_streaming']
+               'client_compressed_unary', 'server_compressed_unary',
+               'client_compressed_streaming', 'server_compressed_streaming']
 
 _AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
                     'oauth2_auth_token', 'per_rpc_creds']
@@ -360,7 +374,7 @@ def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
 
   # turn environ into -e docker args
   if environ:
-    for k,v in environ.iteritems():
+    for k,v in environ.items():
       docker_cmdline += ['-e', '%s=%s' % (k,v)]
 
   # set working directory
@@ -464,7 +478,8 @@ def cloud_to_prod_jobspec(language, test_case, server_host_name,
           flake_retries=5 if args.allow_flakes else 0,
           timeout_retries=2 if args.allow_flakes else 0,
           kill_handler=_job_kill_handler)
-  test_job.container_name = container_name
+  if docker_image:
+    test_job.container_name = container_name
   return test_job
 
 
@@ -500,7 +515,8 @@ def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
           flake_retries=5 if args.allow_flakes else 0,
           timeout_retries=2 if args.allow_flakes else 0,
           kill_handler=_job_kill_handler)
-  test_job.container_name = container_name
+  if docker_image:
+    test_job.container_name = container_name
   return test_job
 
 
@@ -660,15 +676,15 @@ servers = set(s for s in itertools.chain.from_iterable(_SERVERS
 
 if args.use_docker:
   if not args.travis:
-    print 'Seen --use_docker flag, will run interop tests under docker.'
-    print
-    print 'IMPORTANT: The changes you are testing need to be locally committed'
-    print 'because only the committed changes in the current branch will be'
-    print 'copied to the docker environment.'
+    print('Seen --use_docker flag, will run interop tests under docker.')
+    print('')
+    print('IMPORTANT: The changes you are testing need to be locally committed')
+    print('because only the committed changes in the current branch will be')
+    print('copied to the docker environment.')
     time.sleep(5)
 
 if not args.use_docker and servers:
-  print 'Running interop servers is only supported with --use_docker option enabled.'
+  print('Running interop servers is only supported with --use_docker option enabled.')
   sys.exit(1)
 
 languages = set(_LANGUAGES[l]
@@ -754,7 +770,7 @@ try:
     (server_host, server_port) = server[1].split(':')
     server_addresses[server_name] = (server_host, server_port)
 
-  for server_name, server_address in server_addresses.iteritems():
+  for server_name, server_address in server_addresses.items():
     (server_host, server_port) = server_address
     server_language = _LANGUAGES.get(server_name, None)
     skip_server = []  # test cases unimplemented by server
@@ -786,7 +802,7 @@ try:
         jobs.append(test_job)
 
   if not jobs:
-    print 'No jobs to run.'
+    print('No jobs to run.')
     for image in docker_images.itervalues():
       dockerjob.remove_image(image, skip_nonexistent=True)
     sys.exit(1)
@@ -800,7 +816,7 @@ try:
 
   report_utils.render_junit_xml_report(resultset, 'report.xml')
 
-  for name, job in resultset.iteritems():
+  for name, job in resultset.items():
     if "http2" in name:
       job[0].http2results = aggregate_http2_results(job[0].message)
 
@@ -812,12 +828,12 @@ try:
 
 finally:
   # Check if servers are still running.
-  for server, job in server_jobs.iteritems():
+  for server, job in server_jobs.items():
     if not job.is_running():
-      print 'Server "%s" has exited prematurely.' % server
+      print('Server "%s" has exited prematurely.' % server)
 
   dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
 
   for image in docker_images.itervalues():
-    print 'Removing docker image %s' % image
+    print('Removing docker image %s' % image)
     dockerjob.remove_image(image)
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index f037d0d17d9ec403b61b4122d781acdbf74dc5f1..5fdf7a407d90102128a69bb9ba7ffd36da69009b 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -30,6 +30,8 @@
 
 """Run performance tests locally or remotely."""
 
+from __future__ import print_function
+
 import argparse
 import itertools
 import jobset
@@ -61,11 +63,11 @@ class QpsWorkerJob:
     self._spec = spec
     self.language = language
     self.host_and_port = host_and_port
-    self._job = jobset.Job(spec, bin_hash=None, newline_on_success=True, travis=True, add_env={})
+    self._job = jobset.Job(spec, newline_on_success=True, travis=True, add_env={})
 
   def is_running(self):
     """Polls a job and returns True if given job is still running."""
-    return self._job.state(jobset.NoCache()) == jobset._RUNNING
+    return self._job.state() == jobset._RUNNING
 
   def kill(self):
     return self._job.kill()
@@ -310,7 +312,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
                             'in the same scenario')
           if custom_server_lang:
             if not workers_by_lang.get(custom_server_lang, []):
-              print 'Warning: Skipping scenario %s as' % scenario_json['name']
+              print('Warning: Skipping scenario %s as' % scenario_json['name'])
               print('SERVER_LANGUAGE is set to %s yet the language has '
                     'not been selected with -l' % custom_server_lang)
               continue
@@ -319,7 +321,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
               workers[idx] = workers_by_lang[custom_server_lang][idx]
           if custom_client_lang:
             if not workers_by_lang.get(custom_client_lang, []):
-              print 'Warning: Skipping scenario %s as' % scenario_json['name']
+              print('Warning: Skipping scenario %s as' % scenario_json['name'])
               print('CLIENT_LANGUAGE is set to %s yet the language has '
                     'not been selected with -l' % custom_client_lang)
               continue
@@ -344,14 +346,14 @@ def finish_qps_workers(jobs):
   while any(job.is_running() for job in jobs):
     for job in qpsworker_jobs:
       if job.is_running():
-        print 'QPS worker "%s" is still running.' % job.host_and_port
+        print('QPS worker "%s" is still running.' % job.host_and_port)
     if retries > 10:
-      print 'Killing all QPS workers.'
+      print('Killing all QPS workers.')
       for job in jobs:
         job.kill()
     retries += 1
     time.sleep(3)
-  print 'All QPS workers finished.'
+  print('All QPS workers finished.')
 
 
 argp = argparse.ArgumentParser(description='Run performance tests.')
diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/run_python.sh
index 8059059d41b4c08df47ecf5649e6ef5c9fab5f84..17e0186f2a86485d9f2a598b0cce3a05c795a3a5 100755
--- a/tools/run_tests/run_python.sh
+++ b/tools/run_tests/run_python.sh
@@ -33,24 +33,11 @@ set -ex
 # change to grpc repo root
 cd $(dirname $0)/../..
 
-TOX_PYTHON_ENV="$1"
+PYTHON=`realpath -s "${1:-py27/bin/python}"`
 
 ROOT=`pwd`
-export LD_LIBRARY_PATH=$ROOT/libs/$CONFIG
-export DYLD_LIBRARY_PATH=$ROOT/libs/$CONFIG
-export PATH=$ROOT/bins/$CONFIG:$ROOT/bins/$CONFIG/protobuf:$PATH
-export CFLAGS="-I$ROOT/include -std=c89"
-export LDFLAGS="-L$ROOT/libs/$CONFIG"
-export GRPC_PYTHON_BUILD_WITH_CYTHON=1
-export GRPC_PYTHON_USE_PRECOMPILED_BINARIES=0
 
-if [ "$CONFIG" = "gcov" ]
-then
-  export GRPC_PYTHON_ENABLE_CYTHON_TRACING=1
-  tox -e ${TOX_PYTHON_ENV}
-else
-  $ROOT/.tox/${TOX_PYTHON_ENV}/bin/python $ROOT/setup.py test_lite
-fi
+$PYTHON $ROOT/src/python/grpcio_tests/setup.py test_lite
 
 mkdir -p $ROOT/reports
 rm -rf $ROOT/reports/python-coverage
diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py
index e42ee24ffb0fd08226045332a4695f80a4b04d40..de4a22877ca52327bb9df04761cefeaae957812d 100755
--- a/tools/run_tests/run_stress_tests.py
+++ b/tools/run_tests/run_stress_tests.py
@@ -29,6 +29,8 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 """Run stress test in C++"""
 
+from __future__ import print_function
+
 import argparse
 import atexit
 import dockerjob
@@ -93,7 +95,7 @@ def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
 
   # turn environ into -e docker args
   if environ:
-    for k, v in environ.iteritems():
+    for k, v in environ.items():
       docker_cmdline += ['-e', '%s=%s' % (k, v)]
 
   # set working directory
@@ -140,7 +142,7 @@ def cloud_to_cloud_jobspec(language,
       '--num_channels_per_server=%s' % num_channels_per_server,
       '--metrics_port=%s' % metrics_port
   ]))
-  print cmdline
+  print(cmdline)
   cwd = language.client_cwd
   environ = language.global_env()
   if docker_image:
@@ -287,7 +289,7 @@ try:
     (server_host, server_port) = server[1].split(':')
     server_addresses[server_name] = (server_host, server_port)
 
-  for server_name, server_address in server_addresses.iteritems():
+  for server_name, server_address in server_addresses.items():
     (server_host, server_port) = server_address
     for language in languages:
       test_job = cloud_to_cloud_jobspec(
@@ -302,7 +304,7 @@ try:
       jobs.append(test_job)
 
   if not jobs:
-    print 'No jobs to run.'
+    print('No jobs to run.')
     for image in docker_images.itervalues():
       dockerjob.remove_image(image, skip_nonexistent=True)
     sys.exit(1)
@@ -317,12 +319,12 @@ try:
 
 finally:
   # Check if servers are still running.
-  for server, job in server_jobs.iteritems():
+  for server, job in server_jobs.items():
     if not job.is_running():
-      print 'Server "%s" has exited prematurely.' % server
+      print('Server "%s" has exited prematurely.' % server)
 
   dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
 
   for image in docker_images.itervalues():
-    print 'Removing docker image %s' % image
+    print('Removing docker image %s' % image)
     dockerjob.remove_image(image)
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 9ef12c5b07e52937d56a22f74cbb35284e93ce1c..59d2cad856c7cc07a98b7a3a98f9c2110ece0ecd 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -30,14 +30,17 @@
 
 """Run tests in parallel."""
 
+from __future__ import print_function
+
 import argparse
 import ast
+import collections
 import glob
-import hashlib
 import itertools
 import json
 import multiprocessing
 import os
+import os.path
 import platform
 import random
 import re
@@ -47,7 +50,7 @@ import sys
 import tempfile
 import traceback
 import time
-import urllib2
+from six.moves import urllib
 import uuid
 
 import jobset
@@ -63,7 +66,7 @@ _FORCE_ENVIRON_FOR_WRAPPERS = {}
 
 
 _POLLING_STRATEGIES = {
-  'linux': ['poll', 'legacy']
+  'linux': ['epoll', 'poll', 'legacy']
 }
 
 
@@ -78,35 +81,27 @@ class Config(object):
     if environ is None:
       environ = {}
     self.build_config = config
-    self.allow_hashing = (config != 'gcov')
     self.environ = environ
     self.environ['CONFIG'] = config
     self.tool_prefix = tool_prefix
     self.timeout_multiplier = timeout_multiplier
 
-  def job_spec(self, cmdline, hash_targets, timeout_seconds=5*60,
+  def job_spec(self, cmdline, timeout_seconds=5*60,
                shortname=None, environ={}, cpu_cost=1.0, flaky=False):
     """Construct a jobset.JobSpec for a test under this config
 
        Args:
          cmdline:      a list of strings specifying the command line the test
                        would like to run
-         hash_targets: either None (don't do caching of test results), or
-                       a list of strings specifying files to include in a
-                       binary hash to check if a test has changed
-                       -- if used, all artifacts needed to run the test must
-                          be listed
     """
     actual_environ = self.environ.copy()
-    for k, v in environ.iteritems():
+    for k, v in environ.items():
       actual_environ[k] = v
     return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
                           shortname=shortname,
                           environ=actual_environ,
                           cpu_cost=cpu_cost,
                           timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
-                          hash_targets=hash_targets
-                              if self.allow_hashing else None,
                           flake_retries=5 if flaky or args.allow_flakes else 0,
                           timeout_retries=3 if args.allow_flakes else 0)
 
@@ -209,7 +204,7 @@ class CLanguage(object):
                                             flaky=target.get('flaky', False),
                                             environ=env))
         elif self.args.regex == '.*' or self.platform == 'windows':
-          print '\nWARNING: binary not found, skipping', binary
+          print('\nWARNING: binary not found, skipping', binary)
     return sorted(out)
 
   def make_targets(self):
@@ -381,52 +376,42 @@ class PhpLanguage(object):
     return 'php'
 
 
+class PythonConfig(collections.namedtuple('PythonConfig', [
+    'name', 'build', 'run'])):
+  """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
+
 class PythonLanguage(object):
 
   def configure(self, config, args):
     self.config = config
     self.args = args
-    self._tox_envs = self._get_tox_envs(self.args.compiler)
+    self.pythons = self._get_pythons(self.args)
 
   def test_specs(self):
     # load list of known test suites
-    with open('src/python/grpcio/tests/tests.json') as tests_json_file:
+    with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
       tests_json = json.load(tests_json_file)
     environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
-    environment['PYTHONPATH'] = '{}:{}'.format(
-      os.path.abspath('src/python/gens'),
-      os.path.abspath('src/python/grpcio_health_checking'))
-    if self.config.build_config != 'gcov':
-      return [self.config.job_spec(
-          ['tools/run_tests/run_python.sh', tox_env],
-          None,
-          environ=dict(environment.items() +
-                       [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
-          shortname='%s.test.%s' % (tox_env, suite_name),
-          timeout_seconds=5*60)
-          for suite_name in tests_json
-          for tox_env in self._tox_envs]
-    else:
-      return [self.config.job_spec(['tools/run_tests/run_python.sh', tox_env],
-                                   None,
-                                   environ=environment,
-                                   shortname='%s.test.coverage' % tox_env,
-                                   timeout_seconds=15*60)
-                                   for tox_env in self._tox_envs]
-
+    return [self.config.job_spec(
+        config.run,
+        timeout_seconds=5*60,
+        environ=dict(list(environment.items()) +
+                     [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
+        shortname='%s.test.%s' % (config.name, suite_name),)
+        for suite_name in tests_json
+        for config in self.pythons]
 
   def pre_build_steps(self):
     return []
 
   def make_targets(self):
-    return ['static_c', 'grpc_python_plugin', 'shared_c']
+    return []
 
   def make_options(self):
     return []
 
   def build_steps(self):
-    return [['tools/run_tests/build_python.sh', tox_env] 
-            for tox_env in self._tox_envs]
+    return [config.build for config in self.pythons]
 
   def post_tests_steps(self):
     return []
@@ -435,18 +420,61 @@ class PythonLanguage(object):
     return 'Makefile'
 
   def dockerfile_dir(self):
-    return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.args.arch)
-
-  def _get_tox_envs(self, compiler):
-    """Returns name of tox environment based on selected compiler."""
-    if compiler == 'default':
-      return ('py27', 'py34')
-    elif compiler == 'python2.7':
-      return ('py27',)
-    elif compiler == 'python3.4':
-      return ('py34',)
+    return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
+
+  def python_manager_name(self):
+    return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie'
+
+  def _get_pythons(self, args):
+    if args.arch == 'x86':
+      bits = '32'
     else:
-      raise Exception('Compiler %s not supported.' % compiler)
+      bits = '64'
+    if os.name == 'nt':
+      shell = ['bash']
+      builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')]
+      builder_prefix_arguments = ['MINGW{}'.format(bits)]
+      venv_relative_python = ['Scripts/python.exe']
+      toolchain = ['mingw32']
+      python_pattern_function = lambda major, minor, bits: (
+          '/c/Python{major}{minor}/python.exe'.format(major=major, minor=minor, bits=bits)
+	  if bits == '64' else
+	  '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
+              major=major, minor=minor, bits=bits))
+    else:
+      shell = []
+      builder = [os.path.abspath('tools/run_tests/build_python.sh')]
+      builder_prefix_arguments = []
+      venv_relative_python = ['bin/python']
+      toolchain = ['unix']
+      # Bit-ness is handled by the test machine's environment
+      python_pattern_function = lambda major, minor, bits: 'python{major}.{minor}'.format(major=major, minor=minor)
+    runner = [os.path.abspath('tools/run_tests/run_python.sh')]
+    python_config_generator = lambda name, major, minor, bits: PythonConfig(
+        name,
+        shell + builder + builder_prefix_arguments
+	    + [python_pattern_function(major=major, minor=minor, bits=bits)]
+	    + [name] + venv_relative_python + toolchain,
+        shell + runner + [os.path.join(name, venv_relative_python[0])])
+    python27_config = python_config_generator(name='py27', major='2', minor='7', bits=bits)
+    python34_config = python_config_generator(name='py34', major='3', minor='4', bits=bits)
+    python35_config = python_config_generator(name='py35', major='3', minor='5', bits=bits)
+    python36_config = python_config_generator(name='py36', major='3', minor='6', bits=bits)
+    if args.compiler == 'default':
+      if os.name == 'nt':
+        return (python27_config,)
+      else:
+        return (python27_config, python34_config,)
+    elif args.compiler == 'python2.7':
+      return (python27_config,)
+    elif args.compiler == 'python3.4':
+      return (python34_config,)
+    elif args.compiler == 'python3.5':
+      return (python35_config,)
+    elif args.compiler == 'python3.6':
+      return (python36_config,)
+    else:
+      raise Exception('Compiler %s not supported.' % args.compiler)
 
   def __str__(self):
     return 'python'
@@ -460,7 +488,7 @@ class RubyLanguage(object):
     _check_compiler(self.args.compiler, ['default'])
 
   def test_specs(self):
-    return [self.config.job_spec(['tools/run_tests/run_ruby.sh'], None,
+    return [self.config.job_spec(['tools/run_tests/run_ruby.sh'],
                                  timeout_seconds=10*60,
                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
 
@@ -500,15 +528,23 @@ class CSharpLanguage(object):
     if self.platform == 'windows':
       # Explicitly choosing between x86 and x64 arch doesn't work yet
       _check_arch(self.args.arch, ['default'])
+      # CoreCLR use 64bit runtime by default.
+      arch_option = 'x64' if self.args.compiler == 'coreclr' else self.args.arch
       self._make_options = [_windows_toolset_option(self.args.compiler),
-                            _windows_arch_option(self.args.arch)]
+                            _windows_arch_option(arch_option)]
     else:
-      _check_compiler(self.args.compiler, ['default'])
+      _check_compiler(self.args.compiler, ['default', 'coreclr'])
+      if self.platform == 'linux' and self.args.compiler == 'coreclr':
+        self._docker_distro = 'coreclr'
+      else:
+        self._docker_distro = 'jessie'
+
       if self.platform == 'mac':
-        # On Mac, official distribution of mono is 32bit.
         # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
-        self._make_options = ['EMBED_OPENSSL=true',
-                              'CFLAGS=-m32', 'LDFLAGS=-m32']
+        self._make_options = ['EMBED_OPENSSL=true']
+        if self.args.compiler != 'coreclr':
+          # On Mac, official distribution of mono is 32bit.
+          self._make_options += ['CFLAGS=-m32', 'LDFLAGS=-m32']
       else:
         self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
@@ -517,17 +553,33 @@ class CSharpLanguage(object):
       tests_by_assembly = json.load(f)
 
     msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
-    nunit_args = ['--labels=All',
-                  '--noresult',
-                  '--workers=1']
-    if self.platform == 'windows':
+    nunit_args = ['--labels=All']
+    assembly_subdir = 'bin/%s' % msbuild_config
+    assembly_extension = '.exe'
+
+    if self.args.compiler == 'coreclr':
+      if self.platform == 'linux':
+        assembly_subdir += '/netstandard1.5/debian.8-x64'
+        assembly_extension = ''
+      elif self.platform == 'mac':
+        assembly_subdir += '/netstandard1.5/osx.10.11-x64'
+        assembly_extension = ''
+      else:
+        assembly_subdir += '/netstandard1.5/win7-x64'
       runtime_cmd = []
     else:
-      runtime_cmd = ['mono']
+      nunit_args += ['--noresult', '--workers=1']
+      if self.platform == 'windows':
+        runtime_cmd = []
+      else:
+        runtime_cmd = ['mono']
 
     specs = []
     for assembly in tests_by_assembly.iterkeys():
-      assembly_file = 'src/csharp/%s/bin/%s/%s.exe' % (assembly, msbuild_config, assembly)
+      assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
+                                                 assembly_subdir,
+                                                 assembly,
+                                                 assembly_extension)
       if self.config.build_config != 'gcov' or self.platform != 'windows':
         # normally, run each test as a separate process
         for test in tests_by_assembly[assembly]:
@@ -570,12 +622,18 @@ class CSharpLanguage(object):
     return self._make_options;
 
   def build_steps(self):
-    if self.platform == 'windows':
-      return [[_windows_build_bat(self.args.compiler),
-               'src/csharp/Grpc.sln',
-               '/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
+    if self.args.compiler == 'coreclr':
+      if self.platform == 'windows':
+        return [['tools\\run_tests\\build_csharp_coreclr.bat']]
+      else:
+        return [['tools/run_tests/build_csharp_coreclr.sh']]
     else:
-      return [['tools/run_tests/build_csharp.sh']]
+      if self.platform == 'windows':
+        return [[_windows_build_bat(self.args.compiler),
+                 'src/csharp/Grpc.sln',
+                 '/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
+      else:
+        return [['tools/run_tests/build_csharp.sh']]
 
   def post_tests_steps(self):
     if self.platform == 'windows':
@@ -587,7 +645,8 @@ class CSharpLanguage(object):
     return 'Makefile'
 
   def dockerfile_dir(self):
-    return 'tools/dockerfile/test/csharp_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
+                                                   _docker_arch_suffix(self.args.arch))
 
   def __str__(self):
     return 'csharp'
@@ -601,14 +660,20 @@ class ObjCLanguage(object):
     _check_compiler(self.args.compiler, ['default'])
 
   def test_specs(self):
-    return [self.config.job_spec(['src/objective-c/tests/run_tests.sh'], None,
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    return [self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
+                                 timeout_seconds=None,
+                                 shortname='objc-tests',
+                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+            self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
+                                 timeout_seconds=15*60,
+                                 shortname='objc-examples-build',
+                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
 
   def pre_build_steps(self):
     return []
 
   def make_targets(self):
-    return ['grpc_objective_c_plugin', 'interop_server']
+    return ['interop_server']
 
   def make_options(self):
     return []
@@ -639,7 +704,7 @@ class Sanity(object):
   def test_specs(self):
     import yaml
     with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
-      return [self.config.job_spec(cmd['script'].split(), None,
+      return [self.config.job_spec(cmd['script'].split(),
                                    timeout_seconds=None, environ={'TEST': 'true'},
                                    cpu_cost=cmd.get('cpu_cost', 1))
               for cmd in yaml.load(f)]
@@ -701,7 +766,7 @@ def _windows_arch_option(arch):
   elif arch == 'x64':
     return '/p:Platform=x64'
   else:
-    print 'Architecture %s not supported.' % arch
+    print('Architecture %s not supported.' % arch)
     sys.exit(1)
 
 
@@ -719,37 +784,39 @@ def _check_arch_option(arch):
     elif runtime_arch == '32bit' and arch == 'x86':
       return
     else:
-      print 'Architecture %s does not match current runtime architecture.' % arch
+      print('Architecture %s does not match current runtime architecture.' % arch)
       sys.exit(1)
   else:
     if args.arch != 'default':
-      print 'Architecture %s not supported on current platform.' % args.arch
+      print('Architecture %s not supported on current platform.' % args.arch)
       sys.exit(1)
 
 
 def _windows_build_bat(compiler):
   """Returns name of build.bat for selected compiler."""
-  if compiler == 'default' or compiler == 'vs2013':
+  # For CoreCLR, fall back to the default compiler for C core
+  if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
     return 'vsprojects\\build_vs2013.bat'
   elif compiler == 'vs2015':
     return 'vsprojects\\build_vs2015.bat'
   elif compiler == 'vs2010':
     return 'vsprojects\\build_vs2010.bat'
   else:
-    print 'Compiler %s not supported.' % compiler
+    print('Compiler %s not supported.' % compiler)
     sys.exit(1)
 
 
 def _windows_toolset_option(compiler):
   """Returns msbuild PlatformToolset for selected compiler."""
-  if compiler == 'default' or compiler == 'vs2013':
+  # For CoreCLR, fall back to the default compiler for C core
+  if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
     return '/p:PlatformToolset=v120'
   elif compiler == 'vs2015':
     return '/p:PlatformToolset=v140'
   elif compiler == 'vs2010':
     return '/p:PlatformToolset=v100'
   else:
-    print 'Compiler %s not supported.' % compiler
+    print('Compiler %s not supported.' % compiler)
     sys.exit(1)
 
 
@@ -760,7 +827,7 @@ def _docker_arch_suffix(arch):
   elif arch == 'x86':
     return 'x86'
   else:
-    print 'Architecture %s not supported with current settings.' % arch
+    print('Architecture %s not supported with current settings.' % arch)
     sys.exit(1)
 
 
@@ -837,8 +904,9 @@ argp.add_argument('--compiler',
                            'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3',
                            'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
                            'vs2010', 'vs2013', 'vs2015',
-                           'python2.7', 'python3.4',
-                           'node0.12', 'node4', 'node5'],
+                           'python2.7', 'python3.4', 'python3.5', 'python3.6',
+                           'node0.12', 'node4', 'node5',
+                           'coreclr'],
                   default='default',
                   help='Selects compiler to use. Allowed values depend on the platform and language.')
 argp.add_argument('--build_only',
@@ -875,7 +943,7 @@ for spec in args.update_submodules:
     branch = spec[1]
   cwd = 'third_party/%s' % submodule
   def git(cmd, cwd=cwd):
-    print 'in %s: git %s' % (cwd, cmd)
+    print('in %s: git %s' % (cwd, cmd))
     subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True)
   git('fetch')
   git('checkout %s' % branch)
@@ -886,8 +954,8 @@ if need_to_regenerate_projects:
   if jobset.platform_string() == 'linux':
     subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True)
   else:
-    print 'WARNING: may need to regenerate projects, but since we are not on'
-    print '         Linux this step is being skipped. Compilation MAY fail.'
+    print('WARNING: may need to regenerate projects, but since we are not on')
+    print('         Linux this step is being skipped. Compilation MAY fail.')
 
 
 # grab config
@@ -914,18 +982,18 @@ for l in languages:
 language_make_options=[]
 if any(language.make_options() for language in languages):
   if not 'gcov' in args.config and len(languages) != 1:
-    print 'languages with custom make options cannot be built simultaneously with other languages'
+    print('languages with custom make options cannot be built simultaneously with other languages')
     sys.exit(1)
   else:
     language_make_options = next(iter(languages)).make_options()
 
 if args.use_docker:
   if not args.travis:
-    print 'Seen --use_docker flag, will run tests under docker.'
-    print
-    print 'IMPORTANT: The changes you are testing need to be locally committed'
-    print 'because only the committed changes in the current branch will be'
-    print 'copied to the docker environment.'
+    print('Seen --use_docker flag, will run tests under docker.')
+    print('')
+    print('IMPORTANT: The changes you are testing need to be locally committed')
+    print('because only the committed changes in the current branch will be')
+    print('copied to the docker environment.')
     time.sleep(5)
 
   dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
@@ -1009,7 +1077,7 @@ build_steps = list(set(
                    for l in languages
                    for cmdline in l.pre_build_steps()))
 if make_targets:
-  make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.iteritems())
+  make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
   build_steps.extend(set(make_commands))
 build_steps.extend(set(
                    jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
@@ -1024,44 +1092,28 @@ runs_per_test = args.runs_per_test
 forever = args.forever
 
 
-class TestCache(object):
-  """Cache for running tests."""
-
-  def __init__(self, use_cache_results):
-    self._last_successful_run = {}
-    self._use_cache_results = use_cache_results
-    self._last_save = time.time()
-
-  def should_run(self, cmdline, bin_hash):
-    if cmdline not in self._last_successful_run:
-      return True
-    if self._last_successful_run[cmdline] != bin_hash:
-      return True
-    if not self._use_cache_results:
-      return True
-    return False
-
-  def finished(self, cmdline, bin_hash):
-    self._last_successful_run[cmdline] = bin_hash
-    if time.time() - self._last_save > 1:
-      self.save()
-
-  def dump(self):
-    return [{'cmdline': k, 'hash': v}
-            for k, v in self._last_successful_run.iteritems()]
-
-  def parse(self, exdump):
-    self._last_successful_run = dict((o['cmdline'], o['hash']) for o in exdump)
+def _shut_down_legacy_server(legacy_server_port):
+  try:
+    version = int(urllib.request.urlopen(
+        'http://localhost:%d/version_number' % legacy_server_port,
+        timeout=10).read())
+  except:
+    pass
+  else:
+    urllib.request.urlopen(
+        'http://localhost:%d/quitquitquit' % legacy_server_port).read()
 
-  def save(self):
-    with open('.run_tests_cache', 'w') as f:
-      f.write(json.dumps(self.dump()))
-    self._last_save = time.time()
 
-  def maybe_load(self):
-    if os.path.exists('.run_tests_cache'):
-      with open('.run_tests_cache') as f:
-        self.parse(json.loads(f.read()))
+def _shut_down_legacy_server(legacy_server_port):
+  try:
+    version = int(urllib2.urlopen(
+        'http://localhost:%d/version_number' % legacy_server_port,
+        timeout=10).read())
+  except:
+    pass
+  else:
+    urllib2.urlopen(
+        'http://localhost:%d/quitquitquit' % legacy_server_port).read()
 
 
 def _start_port_server(port_server_port):
@@ -1070,29 +1122,29 @@ def _start_port_server(port_server_port):
   # if not running ==> start a new one
   # otherwise, leave it up
   try:
-    version = int(urllib2.urlopen(
+    version = int(urllib.request.urlopen(
         'http://localhost:%d/version_number' % port_server_port,
-        timeout=1).read())
-    print 'detected port server running version %d' % version
+        timeout=10).read())
+    print('detected port server running version %d' % version)
     running = True
   except Exception as e:
-    print 'failed to detect port server: %s' % sys.exc_info()[0]
-    print e.strerror
+    print('failed to detect port server: %s' % sys.exc_info()[0])
+    print(e.strerror)
     running = False
   if running:
     current_version = int(subprocess.check_output(
         [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
          'dump_version']))
-    print 'my port server is version %d' % current_version
+    print('my port server is version %d' % current_version)
     running = (version >= current_version)
     if not running:
-      print 'port_server version mismatch: killing the old one'
-      urllib2.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
+      print('port_server version mismatch: killing the old one')
+      urllib.request.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
       time.sleep(1)
   if not running:
     fd, logfile = tempfile.mkstemp()
     os.close(fd)
-    print 'starting port_server, with log file %s' % logfile
+    print('starting port_server, with log file %s' % logfile)
     args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
             '-p', '%d' % port_server_port, '-l', logfile]
     env = dict(os.environ)
@@ -1118,34 +1170,34 @@ def _start_port_server(port_server_port):
     waits = 0
     while True:
       if waits > 10:
-        print 'killing port server due to excessive start up waits'
+        print('killing port server due to excessive start up waits')
         port_server.kill()
       if port_server.poll() is not None:
-        print 'port_server failed to start'
+        print('port_server failed to start')
         # try one final time: maybe another build managed to start one
         time.sleep(1)
         try:
-          urllib2.urlopen('http://localhost:%d/get' % port_server_port,
+          urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
                           timeout=1).read()
-          print 'last ditch attempt to contact port server succeeded'
+          print('last ditch attempt to contact port server succeeded')
           break
         except:
           traceback.print_exc()
           port_log = open(logfile, 'r').read()
-          print port_log
+          print(port_log)
           sys.exit(1)
       try:
-        urllib2.urlopen('http://localhost:%d/get' % port_server_port,
+        urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
                         timeout=1).read()
-        print 'port server is up and ready'
+        print('port server is up and ready')
         break
       except socket.timeout:
-        print 'waiting for port_server: timeout'
+        print('waiting for port_server: timeout')
         traceback.print_exc();
         time.sleep(1)
         waits += 1
-      except urllib2.URLError:
-        print 'waiting for port_server: urlerror'
+      except urllib.error.URLError:
+        print('waiting for port_server: urlerror')
         traceback.print_exc();
         time.sleep(1)
         waits += 1
@@ -1183,7 +1235,7 @@ class BuildAndRunError(object):
 
 # returns a list of things that failed (or an empty list on success)
 def _build_and_run(
-    check_cancelled, newline_on_success, cache, xml_report=None, build_only=False):
+    check_cancelled, newline_on_success, xml_report=None, build_only=False):
   """Do one pass of building & running tests."""
   # build latest sequentially
   num_failures, resultset = jobset.run(
@@ -1200,7 +1252,7 @@ def _build_and_run(
   # start antagonists
   antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
                  for _ in range(0, args.antagonists)]
-  port_server_port = 32767
+  port_server_port = 32766
   _start_port_server(port_server_port)
   resultset = None
   num_test_failures = 0
@@ -1232,7 +1284,6 @@ def _build_and_run(
         all_runs, check_cancelled, newline_on_success=newline_on_success,
         travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
         stop_on_failure=args.stop_on_failure,
-        cache=cache if not xml_report else None,
         add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
     if resultset:
       for k, v in sorted(resultset.items()):
@@ -1261,14 +1312,9 @@ def _build_and_run(
   if num_test_failures:
     out.append(BuildAndRunError.TEST)
 
-  if cache: cache.save()
-
   return out
 
 
-test_cache = TestCache(runs_per_test == 1)
-test_cache.maybe_load()
-
 if forever:
   success = True
   while True:
@@ -1278,7 +1324,6 @@ if forever:
     previous_success = success
     errors = _build_and_run(check_cancelled=have_files_changed,
                             newline_on_success=False,
-                            cache=test_cache,
                             build_only=args.build_only) == 0
     if not previous_success and not errors:
       jobset.message('SUCCESS',
@@ -1290,7 +1335,6 @@ if forever:
 else:
   errors = _build_and_run(check_cancelled=lambda: False,
                           newline_on_success=args.newline_on_success,
-                          cache=test_cache,
                           xml_report=args.xml_report,
                           build_only=args.build_only)
   if not errors:
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index f2d7a1429ea802c14096d0e5ec6d5fbdbe602c8d..b602d695649ffa0643299bfdfdd0331e38fda1e3 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -45,7 +45,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules
  05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f)
  c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0)
  f8ac463766281625ad710900479130c7fcb4d63b third_party/nanopb (nanopb-0.3.4-29-gf8ac463)
- d4d13a4349e4e59d67f311185ddcc1890d956d7a third_party/protobuf (v3.0.0-beta-3.2)
+ bdeb215cab2985195325fcd5e70c3fa751f46e0f third_party/protobuf (v3.0.0-beta-3.3)
  50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8)
 EOF
 
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index 9144b07ded886e6cc2800952858ca706e0c60909..c297025d6734d2a7f4c6af6ed6732a7650890333 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -79,6 +79,23 @@
     "third_party": false, 
     "type": "target"
   }, 
+  {
+    "deps": [
+      "gpr", 
+      "gpr_test_util", 
+      "grpc", 
+      "grpc_test_util", 
+      "test_tcp_server"
+    ], 
+    "headers": [], 
+    "language": "c", 
+    "name": "bad_server_response_test", 
+    "src": [
+      "test/core/end2end/bad_server_response_test.c"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
   {
     "deps": [
       "grpc", 
@@ -315,6 +332,22 @@
     "third_party": false, 
     "type": "target"
   }, 
+  {
+    "deps": [
+      "gpr", 
+      "gpr_test_util", 
+      "grpc", 
+      "grpc_test_util"
+    ], 
+    "headers": [], 
+    "language": "c", 
+    "name": "ev_epoll_linux_test", 
+    "src": [
+      "test/core/iomgr/ev_epoll_linux_test.c"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
   {
     "deps": [
       "gpr", 
@@ -789,9 +822,7 @@
   {
     "deps": [
       "gpr", 
-      "gpr_test_util", 
-      "grpc", 
-      "grpc_test_util"
+      "grpc"
     ], 
     "headers": [], 
     "language": "c", 
@@ -885,9 +916,7 @@
   {
     "deps": [
       "gpr", 
-      "gpr_test_util", 
-      "grpc", 
-      "grpc_test_util"
+      "grpc"
     ], 
     "headers": [], 
     "language": "c", 
@@ -917,9 +946,7 @@
   {
     "deps": [
       "gpr", 
-      "gpr_test_util", 
-      "grpc", 
-      "grpc_test_util"
+      "grpc"
     ], 
     "headers": [], 
     "language": "c", 
@@ -1695,22 +1722,6 @@
     "third_party": false, 
     "type": "target"
   }, 
-  {
-    "deps": [
-      "gpr", 
-      "gpr_test_util", 
-      "grpc", 
-      "grpc_test_util"
-    ], 
-    "headers": [], 
-    "language": "c", 
-    "name": "timers_test", 
-    "src": [
-      "test/core/profiling/timers_test.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
   {
     "deps": [
       "gpr", 
@@ -4648,7 +4659,7 @@
     "language": "c++", 
     "name": "interop_server_main", 
     "src": [
-      "test/cpp/interop/server_main.cc"
+      "test/cpp/interop/interop_server.cc"
     ], 
     "third_party": false, 
     "type": "lib"
@@ -5411,6 +5422,7 @@
       "test/core/end2end/tests/max_concurrent_streams.c", 
       "test/core/end2end/tests/max_message_length.c", 
       "test/core/end2end/tests/negative_deadline.c", 
+      "test/core/end2end/tests/network_status_change.c", 
       "test/core/end2end/tests/no_op.c", 
       "test/core/end2end/tests/payload.c", 
       "test/core/end2end/tests/ping.c", 
@@ -5424,6 +5436,7 @@
       "test/core/end2end/tests/simple_delayed_request.c", 
       "test/core/end2end/tests/simple_metadata.c", 
       "test/core/end2end/tests/simple_request.c", 
+      "test/core/end2end/tests/streaming_error_response.c", 
       "test/core/end2end/tests/trailing_metadata.c"
     ], 
     "third_party": false, 
@@ -5469,6 +5482,7 @@
       "test/core/end2end/tests/max_concurrent_streams.c", 
       "test/core/end2end/tests/max_message_length.c", 
       "test/core/end2end/tests/negative_deadline.c", 
+      "test/core/end2end/tests/network_status_change.c", 
       "test/core/end2end/tests/no_op.c", 
       "test/core/end2end/tests/payload.c", 
       "test/core/end2end/tests/ping.c", 
@@ -5482,6 +5496,7 @@
       "test/core/end2end/tests/simple_delayed_request.c", 
       "test/core/end2end/tests/simple_metadata.c", 
       "test/core/end2end/tests/simple_request.c", 
+      "test/core/end2end/tests/streaming_error_response.c", 
       "test/core/end2end/tests/trailing_metadata.c"
     ], 
     "third_party": false, 
@@ -5703,19 +5718,6 @@
     "third_party": false, 
     "type": "filegroup"
   }, 
-  {
-    "deps": [
-      "grpc++_codegen_base"
-    ], 
-    "headers": [], 
-    "language": "c", 
-    "name": "grpc++_codegen_base_src", 
-    "src": [
-      "src/cpp/codegen/codegen_init.cc"
-    ], 
-    "third_party": false, 
-    "type": "filegroup"
-  }, 
   {
     "deps": [
       "gpr", 
@@ -5746,6 +5748,7 @@
       "src/core/lib/iomgr/endpoint.h", 
       "src/core/lib/iomgr/endpoint_pair.h", 
       "src/core/lib/iomgr/error.h", 
+      "src/core/lib/iomgr/ev_epoll_linux.h", 
       "src/core/lib/iomgr/ev_poll_and_epoll_posix.h", 
       "src/core/lib/iomgr/ev_poll_posix.h", 
       "src/core/lib/iomgr/ev_posix.h", 
@@ -5756,6 +5759,7 @@
       "src/core/lib/iomgr/iomgr_internal.h", 
       "src/core/lib/iomgr/iomgr_posix.h", 
       "src/core/lib/iomgr/load_file.h", 
+      "src/core/lib/iomgr/network_status_tracker.h", 
       "src/core/lib/iomgr/polling_entity.h", 
       "src/core/lib/iomgr/pollset.h", 
       "src/core/lib/iomgr/pollset_set.h", 
@@ -5797,7 +5801,6 @@
       "src/core/lib/surface/init.h", 
       "src/core/lib/surface/lame_client.h", 
       "src/core/lib/surface/server.h", 
-      "src/core/lib/surface/surface_trace.h", 
       "src/core/lib/transport/byte_stream.h", 
       "src/core/lib/transport/connectivity_state.h", 
       "src/core/lib/transport/metadata.h", 
@@ -5851,6 +5854,8 @@
       "src/core/lib/iomgr/endpoint_pair_windows.c", 
       "src/core/lib/iomgr/error.c", 
       "src/core/lib/iomgr/error.h", 
+      "src/core/lib/iomgr/ev_epoll_linux.c", 
+      "src/core/lib/iomgr/ev_epoll_linux.h", 
       "src/core/lib/iomgr/ev_poll_and_epoll_posix.c", 
       "src/core/lib/iomgr/ev_poll_and_epoll_posix.h", 
       "src/core/lib/iomgr/ev_poll_posix.c", 
@@ -5871,6 +5876,8 @@
       "src/core/lib/iomgr/iomgr_windows.c", 
       "src/core/lib/iomgr/load_file.c", 
       "src/core/lib/iomgr/load_file.h", 
+      "src/core/lib/iomgr/network_status_tracker.c", 
+      "src/core/lib/iomgr/network_status_tracker.h", 
       "src/core/lib/iomgr/polling_entity.c", 
       "src/core/lib/iomgr/polling_entity.h", 
       "src/core/lib/iomgr/pollset.h", 
@@ -5960,7 +5967,6 @@
       "src/core/lib/surface/metadata_array.c", 
       "src/core/lib/surface/server.c", 
       "src/core/lib/surface/server.h", 
-      "src/core/lib/surface/surface_trace.h", 
       "src/core/lib/surface/validate_metadata.c", 
       "src/core/lib/surface/version.c", 
       "src/core/lib/transport/byte_stream.c", 
@@ -6747,6 +6753,19 @@
     "third_party": false, 
     "type": "filegroup"
   }, 
+  {
+    "deps": [
+      "grpc++_codegen_base"
+    ], 
+    "headers": [], 
+    "language": "c++", 
+    "name": "grpc++_codegen_base_src", 
+    "src": [
+      "src/cpp/codegen/codegen_init.cc"
+    ], 
+    "third_party": false, 
+    "type": "filegroup"
+  }, 
   {
     "deps": [
       "grpc++_codegen_base", 
diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py
index b42aa17cbbe890d6d1bf59b703e1c8a300b7d490..2e3fa443b960075f16139b485796957c23bd7568 100755
--- a/tools/run_tests/task_runner.py
+++ b/tools/run_tests/task_runner.py
@@ -30,6 +30,8 @@
 
 """Runs selected gRPC test/build tasks."""
 
+from __future__ import print_function
+
 import argparse
 import atexit
 import jobset
@@ -111,7 +113,7 @@ build_jobs = []
 for target in targets:
   build_jobs.append(target.build_jobspec())
 if not build_jobs:
-  print 'Nothing to build.'
+  print('Nothing to build.')
   sys.exit(1)
 
 jobset.message('START', 'Building targets.', do_newline=True)
diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json
index ad8651c4f178b21f664fe21f77981894c5b4fe97..6abfb8770f1bd81a61ea84f12c95609cd27da55e 100644
--- a/tools/run_tests/tests.json
+++ b/tools/run_tests/tests.json
@@ -85,6 +85,27 @@
       "windows"
     ]
   }, 
+  {
+    "args": [], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "gtest": false, 
+    "language": "c", 
+    "name": "bad_server_response_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ]
+  }, 
   {
     "args": [], 
     "ci_platforms": [
@@ -377,6 +398,21 @@
       "windows"
     ]
   }, 
+  {
+    "args": [], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "gtest": false, 
+    "language": "c", 
+    "name": "ev_epoll_linux_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
   {
     "args": [], 
     "ci_platforms": [
@@ -732,7 +768,7 @@
       "posix", 
       "windows"
     ], 
-    "cpu_cost": 3, 
+    "cpu_cost": 10, 
     "exclude_configs": [], 
     "flaky": false, 
     "gtest": false, 
@@ -753,7 +789,7 @@
       "posix", 
       "windows"
     ], 
-    "cpu_cost": 1, 
+    "cpu_cost": 10, 
     "exclude_configs": [], 
     "flaky": false, 
     "gtest": false, 
@@ -1798,27 +1834,6 @@
       "windows"
     ]
   }, 
-  {
-    "args": [], 
-    "ci_platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "cpu_cost": 1.0, 
-    "exclude_configs": [], 
-    "flaky": false, 
-    "gtest": false, 
-    "language": "c", 
-    "name": "timers_test", 
-    "platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ]
-  }, 
   {
     "args": [], 
     "ci_platforms": [
@@ -4921,6 +4936,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_census_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -5207,6 +5244,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_census_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -5757,6 +5816,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_compress_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -6043,6 +6124,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_compress_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -6569,6 +6672,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_fakesec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -6842,6 +6966,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_fakesec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -7285,7 +7430,7 @@
   }, 
   {
     "args": [
-      "no_op"
+      "network_status_change"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7305,7 +7450,7 @@
   }, 
   {
     "args": [
-      "payload"
+      "no_op"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7325,7 +7470,7 @@
   }, 
   {
     "args": [
-      "ping_pong_streaming"
+      "payload"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7345,7 +7490,7 @@
   }, 
   {
     "args": [
-      "registered_call"
+      "ping_pong_streaming"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7365,14 +7510,14 @@
   }, 
   {
     "args": [
-      "request_with_flags"
+      "registered_call"
     ], 
     "ci_platforms": [
       "linux", 
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7385,14 +7530,14 @@
   }, 
   {
     "args": [
-      "request_with_payload"
+      "request_with_flags"
     ], 
     "ci_platforms": [
       "linux", 
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 1.0, 
+    "cpu_cost": 0.1, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7405,7 +7550,7 @@
   }, 
   {
     "args": [
-      "server_finishes_request"
+      "request_with_payload"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7425,7 +7570,7 @@
   }, 
   {
     "args": [
-      "shutdown_finishes_calls"
+      "server_finishes_request"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7445,7 +7590,7 @@
   }, 
   {
     "args": [
-      "shutdown_finishes_tags"
+      "shutdown_finishes_calls"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7465,7 +7610,7 @@
   }, 
   {
     "args": [
-      "simple_metadata"
+      "shutdown_finishes_tags"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7485,7 +7630,7 @@
   }, 
   {
     "args": [
-      "simple_request"
+      "simple_metadata"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7505,7 +7650,7 @@
   }, 
   {
     "args": [
-      "trailing_metadata"
+      "simple_request"
     ], 
     "ci_platforms": [
       "linux", 
@@ -7525,10 +7670,9 @@
   }, 
   {
     "args": [
-      "bad_hostname"
+      "streaming_error_response"
     ], 
     "ci_platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -7537,9 +7681,8 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
-    "name": "h2_full_test", 
+    "name": "h2_fd_test", 
     "platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -7547,10 +7690,9 @@
   }, 
   {
     "args": [
-      "binary_metadata"
+      "trailing_metadata"
     ], 
     "ci_platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -7559,9 +7701,8 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
-    "name": "h2_full_test", 
+    "name": "h2_fd_test", 
     "platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -7569,7 +7710,7 @@
   }, 
   {
     "args": [
-      "call_creds"
+      "bad_hostname"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7591,7 +7732,7 @@
   }, 
   {
     "args": [
-      "cancel_after_accept"
+      "binary_metadata"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7599,7 +7740,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7613,7 +7754,7 @@
   }, 
   {
     "args": [
-      "cancel_after_client_done"
+      "call_creds"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7635,7 +7776,7 @@
   }, 
   {
     "args": [
-      "cancel_after_invoke"
+      "cancel_after_accept"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7657,7 +7798,7 @@
   }, 
   {
     "args": [
-      "cancel_before_invoke"
+      "cancel_after_client_done"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7665,7 +7806,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7679,7 +7820,7 @@
   }, 
   {
     "args": [
-      "cancel_in_a_vacuum"
+      "cancel_after_invoke"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7701,7 +7842,7 @@
   }, 
   {
     "args": [
-      "cancel_with_status"
+      "cancel_before_invoke"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7723,7 +7864,7 @@
   }, 
   {
     "args": [
-      "compressed_payload"
+      "cancel_in_a_vacuum"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7731,7 +7872,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 1.0, 
+    "cpu_cost": 0.1, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7745,7 +7886,7 @@
   }, 
   {
     "args": [
-      "connectivity"
+      "cancel_with_status"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7767,7 +7908,7 @@
   }, 
   {
     "args": [
-      "default_host"
+      "compressed_payload"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7789,7 +7930,7 @@
   }, 
   {
     "args": [
-      "disappearing_server"
+      "connectivity"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7797,7 +7938,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 1.0, 
+    "cpu_cost": 0.1, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7811,7 +7952,7 @@
   }, 
   {
     "args": [
-      "empty_batch"
+      "default_host"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7833,7 +7974,7 @@
   }, 
   {
     "args": [
-      "filter_causes_close"
+      "disappearing_server"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7855,7 +7996,7 @@
   }, 
   {
     "args": [
-      "graceful_server_shutdown"
+      "empty_batch"
     ], 
     "ci_platforms": [
       "windows", 
@@ -7863,7 +8004,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -7877,7 +8018,51 @@
   }, 
   {
     "args": [
-      "high_initial_seqno"
+      "filter_causes_close"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
+      "graceful_server_shutdown"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
+      "high_initial_seqno"
     ], 
     "ci_platforms": [
       "windows", 
@@ -8051,6 +8236,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -8337,6 +8544,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -8743,6 +8972,22 @@
       "linux"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+pipe_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -8951,6 +9196,22 @@
       "linux"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+pipe_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -9473,6 +9734,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+trace_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -9759,6 +10042,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+trace_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -10309,6 +10614,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_loadreporting_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -10595,6 +10922,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_loadreporting_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -11121,6 +11470,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_oauth2_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -11394,6 +11764,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_oauth2_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -11835,6 +12226,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -12066,6 +12478,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -12528,6 +12961,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -12759,6 +13213,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -13200,6 +13675,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair+trace_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -13431,6 +13927,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair+trace_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -13893,6 +14410,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_1byte_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -14124,6 +14662,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_1byte_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -14673,6 +15232,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -14959,6 +15540,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -15509,6 +16112,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_cert_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -15795,6 +16420,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_cert_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -16239,28 +16886,7 @@
   }, 
   {
     "args": [
-      "no_op"
-    ], 
-    "ci_platforms": [
-      "windows", 
-      "linux", 
-      "posix"
-    ], 
-    "cpu_cost": 1.0, 
-    "exclude_configs": [], 
-    "flaky": false, 
-    "language": "c", 
-    "name": "h2_ssl_proxy_test", 
-    "platforms": [
-      "windows", 
-      "linux", 
-      "mac", 
-      "posix"
-    ]
-  }, 
-  {
-    "args": [
-      "payload"
+      "network_status_change"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16281,7 +16907,7 @@
   }, 
   {
     "args": [
-      "ping_pong_streaming"
+      "no_op"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16302,7 +16928,7 @@
   }, 
   {
     "args": [
-      "registered_call"
+      "payload"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16323,7 +16949,7 @@
   }, 
   {
     "args": [
-      "request_with_payload"
+      "ping_pong_streaming"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16344,7 +16970,7 @@
   }, 
   {
     "args": [
-      "server_finishes_request"
+      "registered_call"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16365,7 +16991,7 @@
   }, 
   {
     "args": [
-      "shutdown_finishes_calls"
+      "request_with_payload"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16386,7 +17012,49 @@
   }, 
   {
     "args": [
-      "shutdown_finishes_tags"
+      "server_finishes_request"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
+      "shutdown_finishes_calls"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
+      "shutdown_finishes_tags"
     ], 
     "ci_platforms": [
       "windows", 
@@ -16468,6 +17136,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -16949,6 +17638,26 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_uds_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -17209,6 +17918,26 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_uds_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -17735,6 +18464,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_census_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -18021,6 +18772,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_census_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -18549,6 +19322,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_compress_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -18835,6 +19630,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_compress_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -19259,7 +20076,7 @@
   }, 
   {
     "args": [
-      "no_op"
+      "network_status_change"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19279,7 +20096,7 @@
   }, 
   {
     "args": [
-      "payload"
+      "no_op"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19299,7 +20116,7 @@
   }, 
   {
     "args": [
-      "ping_pong_streaming"
+      "payload"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19319,7 +20136,7 @@
   }, 
   {
     "args": [
-      "registered_call"
+      "ping_pong_streaming"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19339,14 +20156,14 @@
   }, 
   {
     "args": [
-      "request_with_flags"
+      "registered_call"
     ], 
     "ci_platforms": [
       "linux", 
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19359,14 +20176,14 @@
   }, 
   {
     "args": [
-      "request_with_payload"
+      "request_with_flags"
     ], 
     "ci_platforms": [
       "linux", 
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 1.0, 
+    "cpu_cost": 0.1, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19379,7 +20196,7 @@
   }, 
   {
     "args": [
-      "server_finishes_request"
+      "request_with_payload"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19399,7 +20216,7 @@
   }, 
   {
     "args": [
-      "shutdown_finishes_calls"
+      "server_finishes_request"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19419,7 +20236,7 @@
   }, 
   {
     "args": [
-      "shutdown_finishes_tags"
+      "shutdown_finishes_calls"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19439,7 +20256,7 @@
   }, 
   {
     "args": [
-      "simple_metadata"
+      "shutdown_finishes_tags"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19459,7 +20276,7 @@
   }, 
   {
     "args": [
-      "simple_request"
+      "simple_metadata"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19479,7 +20296,7 @@
   }, 
   {
     "args": [
-      "trailing_metadata"
+      "simple_request"
     ], 
     "ci_platforms": [
       "linux", 
@@ -19499,10 +20316,9 @@
   }, 
   {
     "args": [
-      "bad_hostname"
+      "streaming_error_response"
     ], 
     "ci_platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -19511,9 +20327,8 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
-    "name": "h2_full_nosec_test", 
+    "name": "h2_fd_nosec_test", 
     "platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -19521,10 +20336,9 @@
   }, 
   {
     "args": [
-      "binary_metadata"
+      "trailing_metadata"
     ], 
     "ci_platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -19533,9 +20347,8 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
-    "name": "h2_full_nosec_test", 
+    "name": "h2_fd_nosec_test", 
     "platforms": [
-      "windows", 
       "linux", 
       "mac", 
       "posix"
@@ -19543,7 +20356,7 @@
   }, 
   {
     "args": [
-      "cancel_after_accept"
+      "bad_hostname"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19551,7 +20364,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19565,7 +20378,7 @@
   }, 
   {
     "args": [
-      "cancel_after_client_done"
+      "binary_metadata"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19587,7 +20400,7 @@
   }, 
   {
     "args": [
-      "cancel_after_invoke"
+      "cancel_after_accept"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19609,7 +20422,7 @@
   }, 
   {
     "args": [
-      "cancel_before_invoke"
+      "cancel_after_client_done"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19617,7 +20430,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19631,7 +20444,7 @@
   }, 
   {
     "args": [
-      "cancel_in_a_vacuum"
+      "cancel_after_invoke"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19653,7 +20466,7 @@
   }, 
   {
     "args": [
-      "cancel_with_status"
+      "cancel_before_invoke"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19675,7 +20488,7 @@
   }, 
   {
     "args": [
-      "compressed_payload"
+      "cancel_in_a_vacuum"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19683,7 +20496,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 1.0, 
+    "cpu_cost": 0.1, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19697,7 +20510,7 @@
   }, 
   {
     "args": [
-      "connectivity"
+      "cancel_with_status"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19719,7 +20532,7 @@
   }, 
   {
     "args": [
-      "default_host"
+      "compressed_payload"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19741,7 +20554,7 @@
   }, 
   {
     "args": [
-      "disappearing_server"
+      "connectivity"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19749,7 +20562,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 1.0, 
+    "cpu_cost": 0.1, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19763,7 +20576,7 @@
   }, 
   {
     "args": [
-      "empty_batch"
+      "default_host"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19785,7 +20598,7 @@
   }, 
   {
     "args": [
-      "filter_causes_close"
+      "disappearing_server"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19807,7 +20620,7 @@
   }, 
   {
     "args": [
-      "graceful_server_shutdown"
+      "empty_batch"
     ], 
     "ci_platforms": [
       "windows", 
@@ -19815,7 +20628,7 @@
       "mac", 
       "posix"
     ], 
-    "cpu_cost": 0.1, 
+    "cpu_cost": 1.0, 
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
@@ -19829,7 +20642,51 @@
   }, 
   {
     "args": [
-      "high_initial_seqno"
+      "filter_causes_close"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
+      "graceful_server_shutdown"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
+      "high_initial_seqno"
     ], 
     "ci_platforms": [
       "windows", 
@@ -20003,6 +20860,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -20289,6 +21168,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -20679,6 +21580,22 @@
       "linux"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+pipe_nosec_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -20887,6 +21804,22 @@
       "linux"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+pipe_nosec_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -21387,6 +22320,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+trace_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -21673,6 +22628,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+trace_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -22201,6 +23178,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_loadreporting_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -22487,6 +23486,28 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_loadreporting_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -22908,6 +23929,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_proxy_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -23139,6 +24181,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_proxy_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -23580,6 +24643,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -23811,6 +24895,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -24231,6 +25336,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair+trace_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -24462,6 +25588,27 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair+trace_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -24943,6 +26090,29 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [
+      "msan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_1byte_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -25196,6 +26366,29 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [
+      "msan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_1byte_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -25659,6 +26852,26 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "network_status_change"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_uds_nosec_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "no_op"
@@ -25919,6 +27132,26 @@
       "posix"
     ]
   }, 
+  {
+    "args": [
+      "streaming_error_response"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_uds_nosec_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
   {
     "args": [
       "trailing_metadata"
@@ -54209,6 +55442,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/2c452818a10ddef09b90c89a53db14b9b56b21f3"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/2c6e69067c68c145dc5d3a60b86d8081fdf95d0d"
@@ -55007,6 +56259,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/42ead79c94eccdf8a8c3d8036be73e14fa260dd5"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/43202ad9b1a689d919ab9ae91c2d0223394867bf"
@@ -55349,6 +56620,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/4e05d6cf1c3f0c04f6ee92d09a53ee0fe35c085a"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/4e21c4b5c454df51c102f09ea1ba78c42133ee16"
@@ -57268,6 +58558,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/8f980dd25f1c77e3536131c2c620aa32e8c13180"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/90a9c3390752b94ca19a58cb2fe6267bc818f718"
@@ -58484,6 +59793,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/aef36c49d7dec0dcf8cdc224d9e9221fa2cb1db0"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/af8b24ffaecdfaf96c0cd7c76f31dc9e1b4d0935"
@@ -59529,6 +60857,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/crash-14ed70cd9ea7987cdd0c8f6e39398ee7c60ee2ff"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/crash-3bd02c98286bfa7be8e13c5500ddb587bba74fbb"
@@ -60194,6 +61541,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/dcb06a6e34cbed15515e5b3581ca666f704777bd"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/dccd1fd6d3394f5f68c87950ed7356a2e9ef0f6f"
@@ -60688,6 +62054,25 @@
     ], 
     "uses_polling": false
   }, 
+  {
+    "args": [
+      "test/core/end2end/fuzzers/client_fuzzer_corpus/ea46b684f1e67a27c231f2d536c41da631189b9c"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "tsan"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "client_fuzzer_one_entry", 
+    "platforms": [
+      "linux"
+    ], 
+    "uses_polling": false
+  }, 
   {
     "args": [
       "test/core/end2end/fuzzers/client_fuzzer_corpus/eb969b9ab1b0d6b5d197795223ba7a091ebd8460"
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 66b74a32db00a2b9fd2325e1fa0731e2dbc437e6..0000000000000000000000000000000000000000
--- a/tox.ini
+++ /dev/null
@@ -1,26 +0,0 @@
-# GRPC Python tox (test environment) settings
-[tox]
-skipsdist = true
-envlist = py27,py34
-
-[testenv]
-setenv =
-    PYGRPC_ROOT = {toxinidir}/src/python/grpcio/
-commands =
-    {envpython} setup.py build_py
-    {envpython} setup.py test
-    {envbindir}/coverage combine
-# TODO(atash): we currently ignore cygrpc.pyx due to an insufficiency in Cython's coverage plug-in. Discussion is ongoing.
-    {envbindir}/coverage html --include='{env:PYGRPC_ROOT}/grpc/*' --omit='{env:PYGRPC_ROOT}/grpc/framework/alpha/*','{env:PYGRPC_ROOT}/grpc/early_adopter/*','{env:PYGRPC_ROOT}/grpc/framework/base/*','{env:PYGRPC_ROOT}/grpc/framework/face/*','{env:PYGRPC_ROOT}/grpc/_adapter/fore.py','{env:PYGRPC_ROOT}/grpc/_adapter/rear.py','{env:PYGRPC_ROOT}/grpc/_cython/cygrpc.pyx'
-    {envbindir}/coverage report --include='{env:PYGRPC_ROOT}/grpc/*' --omit='{env:PYGRPC_ROOT}/grpc/framework/alpha/*','{env:PYGRPC_ROOT}/grpc/early_adopter/*','{env:PYGRPC_ROOT}/grpc/framework/base/*','{env:PYGRPC_ROOT}/grpc/framework/face/*','{env:PYGRPC_ROOT}/grpc/_adapter/fore.py','{env:PYGRPC_ROOT}/grpc/_adapter/rear.py','{env:PYGRPC_ROOT}/grpc/_cython/cygrpc.pyx'
-deps =
-    -rrequirements.txt
-passenv = *
-
-[testenv:interop_client]
-commands =
-    {envpython} setup.py run_interop --client --args='{posargs}'
-
-[testenv:interop_server]
-commands =
-    {envpython} setup.py run_interop --server --args='{posargs}'
diff --git a/vsprojects/buildtests_c.sln b/vsprojects/buildtests_c.sln
index a847add773018650ab745454beeeda7b17c3bca7..7232440ab7627fbc7c9f9804a94a3589a9b58af2 100644
--- a/vsprojects/buildtests_c.sln
+++ b/vsprojects/buildtests_c.sln
@@ -56,6 +56,18 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bad_client_test", "vcxproj\
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bad_server_response_test", "vcxproj\test\bad_server_response_test\bad_server_response_test.vcxproj", "{2B73DA77-EF66-362C-24AD-317E3B8B28C1}"
+	ProjectSection(myProperties) = preProject
+        	lib = "False"
+	EndProjectSection
+	ProjectSection(ProjectDependencies) = postProject
+		{E3110C46-A148-FF65-08FD-3324829BE7FE} = {E3110C46-A148-FF65-08FD-3324829BE7FE}
+		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
+		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
+		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
+		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
+	EndProjectSection
+EndProject
 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "badreq_bad_client_test", "vcxproj\test\badreq_bad_client_test\badreq_bad_client_test.vcxproj", "{8A811C28-E04E-A444-E4C1-7588DF5B90AE}"
 	ProjectSection(myProperties) = preProject
         	lib = "False"
@@ -522,9 +534,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_create_jwt", "vcxproj\
         	lib = "False"
 	EndProjectSection
 	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
@@ -539,7 +549,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_credentials_test", "vc
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_fetch_oauth2", "vcxproj\.\grpc_fetch_oauth2\grpc_fetch_oauth2.vcxproj", "{43722E98-54EC-5058-3DAC-327F45964971}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_fetch_oauth2", "vcxproj\test\grpc_fetch_oauth2\grpc_fetch_oauth2.vcxproj", "{43722E98-54EC-5058-3DAC-327F45964971}"
 	ProjectSection(myProperties) = preProject
         	lib = "False"
 	EndProjectSection
@@ -577,9 +587,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_print_google_default_c
         	lib = "False"
 	EndProjectSection
 	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
@@ -628,9 +636,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_verify_jwt", "vcxproj\
         	lib = "False"
 	EndProjectSection
 	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
@@ -1364,17 +1370,6 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "timer_list_test", "vcxproj\
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "timers_test", "vcxproj\test\timers_test\timers_test.vcxproj", "{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}"
-	ProjectSection(myProperties) = preProject
-        	lib = "False"
-	EndProjectSection
-	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
-		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
-		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
-	EndProjectSection
-EndProject
 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "transport_connectivity_state_test", "vcxproj\test\transport_connectivity_state_test\transport_connectivity_state_test.vcxproj", "{659121F6-1639-AC6B-053E-9D17A8B94D56}"
 	ProjectSection(myProperties) = preProject
         	lib = "False"
@@ -1524,6 +1519,22 @@ Global
 		{BA67B418-B699-E41A-9CC4-0279C49481A5}.Release-DLL|Win32.Build.0 = Release|Win32
 		{BA67B418-B699-E41A-9CC4-0279C49481A5}.Release-DLL|x64.ActiveCfg = Release|x64
 		{BA67B418-B699-E41A-9CC4-0279C49481A5}.Release-DLL|x64.Build.0 = Release|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug|Win32.ActiveCfg = Debug|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug|x64.ActiveCfg = Debug|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release|Win32.ActiveCfg = Release|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release|x64.ActiveCfg = Release|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug|Win32.Build.0 = Debug|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug|x64.Build.0 = Debug|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release|Win32.Build.0 = Release|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release|x64.Build.0 = Release|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug-DLL|Win32.ActiveCfg = Debug|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug-DLL|Win32.Build.0 = Debug|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug-DLL|x64.ActiveCfg = Debug|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Debug-DLL|x64.Build.0 = Debug|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release-DLL|Win32.ActiveCfg = Release|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release-DLL|Win32.Build.0 = Release|Win32
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release-DLL|x64.ActiveCfg = Release|x64
+		{2B73DA77-EF66-362C-24AD-317E3B8B28C1}.Release-DLL|x64.Build.0 = Release|x64
 		{8A811C28-E04E-A444-E4C1-7588DF5B90AE}.Debug|Win32.ActiveCfg = Debug|Win32
 		{8A811C28-E04E-A444-E4C1-7588DF5B90AE}.Debug|x64.ActiveCfg = Debug|x64
 		{8A811C28-E04E-A444-E4C1-7588DF5B90AE}.Release|Win32.ActiveCfg = Release|Win32
@@ -3476,22 +3487,6 @@ Global
 		{C43EA45B-1E72-C58D-8CE3-A879D1B1E2DB}.Release-DLL|Win32.Build.0 = Release|Win32
 		{C43EA45B-1E72-C58D-8CE3-A879D1B1E2DB}.Release-DLL|x64.ActiveCfg = Release|x64
 		{C43EA45B-1E72-C58D-8CE3-A879D1B1E2DB}.Release-DLL|x64.Build.0 = Release|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug|Win32.ActiveCfg = Debug|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug|x64.ActiveCfg = Debug|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release|Win32.ActiveCfg = Release|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release|x64.ActiveCfg = Release|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug|Win32.Build.0 = Debug|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug|x64.Build.0 = Debug|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release|Win32.Build.0 = Release|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release|x64.Build.0 = Release|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug-DLL|Win32.ActiveCfg = Debug|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug-DLL|Win32.Build.0 = Debug|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug-DLL|x64.ActiveCfg = Debug|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Debug-DLL|x64.Build.0 = Debug|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release-DLL|Win32.ActiveCfg = Release|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release-DLL|Win32.Build.0 = Release|Win32
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release-DLL|x64.ActiveCfg = Release|x64
-		{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}.Release-DLL|x64.Build.0 = Release|x64
 		{659121F6-1639-AC6B-053E-9D17A8B94D56}.Debug|Win32.ActiveCfg = Debug|Win32
 		{659121F6-1639-AC6B-053E-9D17A8B94D56}.Debug|x64.ActiveCfg = Debug|x64
 		{659121F6-1639-AC6B-053E-9D17A8B94D56}.Release|Win32.ActiveCfg = Release|Win32
diff --git a/vsprojects/grpc.sln b/vsprojects/grpc.sln
index a43daaf294cf553da0431034e892349de9659e33..84720914b0b3462f7eed70a404922dfb3eaa857c 100644
--- a/vsprojects/grpc.sln
+++ b/vsprojects/grpc.sln
@@ -74,9 +74,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_create_jwt", "vcxproj\
         	lib = "False"
 	EndProjectSection
 	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
@@ -89,25 +87,12 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_dll", "vcxproj\.\grpc_
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
 	EndProjectSection
 EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_fetch_oauth2", "vcxproj\.\grpc_fetch_oauth2\grpc_fetch_oauth2.vcxproj", "{43722E98-54EC-5058-3DAC-327F45964971}"
-	ProjectSection(myProperties) = preProject
-        	lib = "False"
-	EndProjectSection
-	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
-		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
-		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
-	EndProjectSection
-EndProject
 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_print_google_default_creds_token", "vcxproj\.\grpc_print_google_default_creds_token\grpc_print_google_default_creds_token.vcxproj", "{C002965C-8457-CCE5-B1BA-E748FF9A11B6}"
 	ProjectSection(myProperties) = preProject
         	lib = "False"
 	EndProjectSection
 	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
@@ -145,9 +130,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "grpc_verify_jwt", "vcxproj\
         	lib = "False"
 	EndProjectSection
 	ProjectSection(ProjectDependencies) = postProject
-		{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} = {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}
 		{29D16885-7228-4C31-81ED-5F9187C7F2A9} = {29D16885-7228-4C31-81ED-5F9187C7F2A9}
-		{EAB0A629-17A9-44DB-B5FF-E91A721FE037} = {EAB0A629-17A9-44DB-B5FF-E91A721FE037}
 		{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} = {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}
 	EndProjectSection
 EndProject
@@ -367,22 +350,6 @@ Global
 		{A2F6CBBA-A553-41B3-A7DE-F26DECCC27F0}.Release-DLL|Win32.Build.0 = Release|Win32
 		{A2F6CBBA-A553-41B3-A7DE-F26DECCC27F0}.Release-DLL|x64.ActiveCfg = Release|x64
 		{A2F6CBBA-A553-41B3-A7DE-F26DECCC27F0}.Release-DLL|x64.Build.0 = Release|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug|Win32.ActiveCfg = Debug|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug|x64.ActiveCfg = Debug|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release|Win32.ActiveCfg = Release|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release|x64.ActiveCfg = Release|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug|Win32.Build.0 = Debug|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug|x64.Build.0 = Debug|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release|Win32.Build.0 = Release|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release|x64.Build.0 = Release|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug-DLL|Win32.ActiveCfg = Debug|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug-DLL|Win32.Build.0 = Debug|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug-DLL|x64.ActiveCfg = Debug|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Debug-DLL|x64.Build.0 = Debug|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release-DLL|Win32.ActiveCfg = Release|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release-DLL|Win32.Build.0 = Release|Win32
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release-DLL|x64.ActiveCfg = Release|x64
-		{43722E98-54EC-5058-3DAC-327F45964971}.Release-DLL|x64.Build.0 = Release|x64
 		{C002965C-8457-CCE5-B1BA-E748FF9A11B6}.Debug|Win32.ActiveCfg = Debug|Win32
 		{C002965C-8457-CCE5-B1BA-E748FF9A11B6}.Debug|x64.ActiveCfg = Debug|x64
 		{C002965C-8457-CCE5-B1BA-E748FF9A11B6}.Release|Win32.ActiveCfg = Release|Win32
diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj b/vsprojects/vcxproj/grpc/grpc.vcxproj
index 26639e39447337dfb7b649737fcf414d74ab0c0c..64e532bb77f990edfb8fdd310dc4f29a9c2e5d8c 100644
--- a/vsprojects/vcxproj/grpc/grpc.vcxproj
+++ b/vsprojects/vcxproj/grpc/grpc.vcxproj
@@ -317,6 +317,7 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h" />
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h" />
@@ -327,6 +328,7 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h" />
@@ -368,7 +370,6 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h" />
-    <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\surface_trace.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h" />
@@ -494,6 +495,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
@@ -514,6 +517,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters
index 351d836278a978eeea4198557af7009289734f7c..c0070ff7657d7475587e134a1a01a9a0b35c3cd2 100644
--- a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters
@@ -58,6 +58,9 @@
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
@@ -88,6 +91,9 @@
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
@@ -704,6 +710,9 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
@@ -734,6 +743,9 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
@@ -857,9 +869,6 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h">
       <Filter>src\core\lib\surface</Filter>
     </ClInclude>
-    <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\surface_trace.h">
-      <Filter>src\core\lib\surface</Filter>
-    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h">
       <Filter>src\core\lib\transport</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/grpc_create_jwt/grpc_create_jwt.vcxproj b/vsprojects/vcxproj/grpc_create_jwt/grpc_create_jwt.vcxproj
index ec4ec4a461fd5d0b5b5cdf69d1281ffb219c2a07..4e8c088e2d76acb6651a3cef82e20b19d92bfa7a 100644
--- a/vsprojects/vcxproj/grpc_create_jwt/grpc_create_jwt.vcxproj
+++ b/vsprojects/vcxproj/grpc_create_jwt/grpc_create_jwt.vcxproj
@@ -151,15 +151,9 @@
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
-    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc_test_util\grpc_test_util.vcxproj">
-      <Project>{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}</Project>
-    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc\grpc.vcxproj">
       <Project>{29D16885-7228-4C31-81ED-5F9187C7F2A9}</Project>
     </ProjectReference>
-    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr_test_util\gpr_test_util.vcxproj">
-      <Project>{EAB0A629-17A9-44DB-B5FF-E91A721FE037}</Project>
-    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr\gpr.vcxproj">
       <Project>{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}</Project>
     </ProjectReference>
diff --git a/vsprojects/vcxproj/grpc_print_google_default_creds_token/grpc_print_google_default_creds_token.vcxproj b/vsprojects/vcxproj/grpc_print_google_default_creds_token/grpc_print_google_default_creds_token.vcxproj
index 87a4a6e9e4269c33019ccaa12f7737dabc131c9f..ed0b98c612227977362c1de404d1187439b661e4 100644
--- a/vsprojects/vcxproj/grpc_print_google_default_creds_token/grpc_print_google_default_creds_token.vcxproj
+++ b/vsprojects/vcxproj/grpc_print_google_default_creds_token/grpc_print_google_default_creds_token.vcxproj
@@ -151,15 +151,9 @@
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
-    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc_test_util\grpc_test_util.vcxproj">
-      <Project>{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}</Project>
-    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc\grpc.vcxproj">
       <Project>{29D16885-7228-4C31-81ED-5F9187C7F2A9}</Project>
     </ProjectReference>
-    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr_test_util\gpr_test_util.vcxproj">
-      <Project>{EAB0A629-17A9-44DB-B5FF-E91A721FE037}</Project>
-    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr\gpr.vcxproj">
       <Project>{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}</Project>
     </ProjectReference>
diff --git a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj
index 17e6876830e2466e08dd2eada49d23fc459fbb8e..eef63b7fda341808e8d991dd9b89aaa8c0c8aae9 100644
--- a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj
+++ b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj
@@ -207,6 +207,7 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h" />
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h" />
@@ -217,6 +218,7 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h" />
@@ -258,7 +260,6 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h" />
-    <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\surface_trace.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h" />
@@ -338,6 +339,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
@@ -358,6 +361,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
diff --git a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters
index bb30a294e802131ffec69629ea2b5829b1e347c9..83cee3f9a3c9248d5cb4aaa5b1c63086c7106d72 100644
--- a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters
@@ -106,6 +106,9 @@
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
@@ -136,6 +139,9 @@
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
@@ -485,6 +491,9 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
@@ -515,6 +524,9 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
@@ -638,9 +650,6 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h">
       <Filter>src\core\lib\surface</Filter>
     </ClInclude>
-    <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\surface_trace.h">
-      <Filter>src\core\lib\surface</Filter>
-    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h">
       <Filter>src\core\lib\transport</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
index 4575c9fe517b8f5498ba0ac3a1cbb7be501f8ff4..cfef5d4cfa003741166f9ea3ef791a4f277ff6d0 100644
--- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
+++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
@@ -306,6 +306,7 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\endpoint_pair.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h" />
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_posix.h" />
@@ -316,6 +317,7 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set.h" />
@@ -357,7 +359,6 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\init.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\lame_client.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h" />
-    <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\surface_trace.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\metadata.h" />
@@ -461,6 +462,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_posix.c">
@@ -481,6 +484,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\pollset_set_windows.c">
diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
index 2e0ff1896e8ecccf499ff1cdcc099c65d049a594..974c947c2d8d1479a75b499c0fa71c7e1256aa76 100644
--- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
@@ -61,6 +61,9 @@
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\error.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.c">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
@@ -91,6 +94,9 @@
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.c">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.c">
       <Filter>src\core\lib\iomgr</Filter>
     </ClCompile>
@@ -611,6 +617,9 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\error.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_epoll_linux.h">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\ev_poll_and_epoll_posix.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
@@ -641,6 +650,9 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\network_status_tracker.h">
+      <Filter>src\core\lib\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\polling_entity.h">
       <Filter>src\core\lib\iomgr</Filter>
     </ClInclude>
@@ -764,9 +776,6 @@
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\server.h">
       <Filter>src\core\lib\surface</Filter>
     </ClInclude>
-    <ClInclude Include="$(SolutionDir)\..\src\core\lib\surface\surface_trace.h">
-      <Filter>src\core\lib\surface</Filter>
-    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\src\core\lib\transport\byte_stream.h">
       <Filter>src\core\lib\transport</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/grpc_verify_jwt/grpc_verify_jwt.vcxproj b/vsprojects/vcxproj/grpc_verify_jwt/grpc_verify_jwt.vcxproj
index 27b166582a74c117b6b80b9d16d2f02a98fd6aee..e75143bee6dc47cb1986b400c9dc48759629e4dd 100644
--- a/vsprojects/vcxproj/grpc_verify_jwt/grpc_verify_jwt.vcxproj
+++ b/vsprojects/vcxproj/grpc_verify_jwt/grpc_verify_jwt.vcxproj
@@ -151,15 +151,9 @@
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
-    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc_test_util\grpc_test_util.vcxproj">
-      <Project>{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}</Project>
-    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc\grpc.vcxproj">
       <Project>{29D16885-7228-4C31-81ED-5F9187C7F2A9}</Project>
     </ProjectReference>
-    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr_test_util\gpr_test_util.vcxproj">
-      <Project>{EAB0A629-17A9-44DB-B5FF-E91A721FE037}</Project>
-    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\gpr\gpr.vcxproj">
       <Project>{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}</Project>
     </ProjectReference>
diff --git a/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj b/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj
index 075750afc6d2cb3f76c4f6e78916bf257bd0eb03..18971d6a3416c0e6d5f50c0eb9b249f2afce8f00 100644
--- a/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj
+++ b/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj
@@ -171,7 +171,7 @@
     </ClCompile>
     <ClInclude Include="$(SolutionDir)\..\src\proto\grpc\testing\test.grpc.pb.h">
     </ClInclude>
-    <ClCompile Include="$(SolutionDir)\..\test\cpp\interop\server_main.cc">
+    <ClCompile Include="$(SolutionDir)\..\test\cpp\interop\interop_server.cc">
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
diff --git a/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj.filters b/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj.filters
index 51a6b9e73c32cfc0b3d3dfd4e160613a662e9c29..4ee8135c04520f01ac19edd97a2d78d1f344849c 100644
--- a/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj.filters
+++ b/vsprojects/vcxproj/interop_server_main/interop_server_main.vcxproj.filters
@@ -10,7 +10,7 @@
     <ClCompile Include="$(SolutionDir)\..\src\proto\grpc\testing\test.proto">
       <Filter>src\proto\grpc\testing</Filter>
     </ClCompile>
-    <ClCompile Include="$(SolutionDir)\..\test\cpp\interop\server_main.cc">
+    <ClCompile Include="$(SolutionDir)\..\test\cpp\interop\interop_server.cc">
       <Filter>test\cpp\interop</Filter>
     </ClCompile>
   </ItemGroup>
diff --git a/vsprojects/vcxproj/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj b/vsprojects/vcxproj/test/bad_server_response_test/bad_server_response_test.vcxproj
similarity index 64%
rename from vsprojects/vcxproj/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj
rename to vsprojects/vcxproj/test/bad_server_response_test/bad_server_response_test.vcxproj
index 393f8e59024f6b5823d28a0f0577d8f1c20926fb..4676f3f6b6d27bbac85f002bd08415adcad0ccd4 100644
--- a/vsprojects/vcxproj/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj
+++ b/vsprojects/vcxproj/test/bad_server_response_test/bad_server_response_test.vcxproj
@@ -1,5 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <Import Project="$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies.openssl.props" Condition="Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\1.0.204.1.props')" />
   <ItemGroup Label="ProjectConfigurations">
     <ProjectConfiguration Include="Debug|Win32">
       <Configuration>Debug</Configuration>
@@ -19,7 +20,7 @@
     </ProjectConfiguration>
   </ItemGroup>
   <PropertyGroup Label="Globals">
-    <ProjectGuid>{43722E98-54EC-5058-3DAC-327F45964971}</ProjectGuid>
+    <ProjectGuid>{2B73DA77-EF66-362C-24AD-317E3B8B28C1}</ProjectGuid>
     <IgnoreWarnIntDirInTempDetected>true</IgnoreWarnIntDirInTempDetected>
     <IntDir>$(SolutionDir)IntDir\$(MSBuildProjectName)\</IntDir>
   </PropertyGroup>
@@ -37,12 +38,12 @@
     <PlatformToolset>v140</PlatformToolset>
   </PropertyGroup>
   <PropertyGroup Condition="'$(Configuration)'=='Debug'" Label="Configuration">
-    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <ConfigurationType>Application</ConfigurationType>
     <UseDebugLibraries>true</UseDebugLibraries>
     <CharacterSet>Unicode</CharacterSet>
   </PropertyGroup>
   <PropertyGroup Condition="'$(Configuration)'=='Release'" Label="Configuration">
-    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <ConfigurationType>Application</ConfigurationType>
     <UseDebugLibraries>false</UseDebugLibraries>
     <WholeProgramOptimization>true</WholeProgramOptimization>
     <CharacterSet>Unicode</CharacterSet>
@@ -53,14 +54,24 @@
   <ImportGroup Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
     <Import Project="$(SolutionDir)\..\vsprojects\global.props" />
+    <Import Project="$(SolutionDir)\..\vsprojects\openssl.props" />
     <Import Project="$(SolutionDir)\..\vsprojects\winsock.props" />
+    <Import Project="$(SolutionDir)\..\vsprojects\zlib.props" />
   </ImportGroup>
   <PropertyGroup Label="UserMacros" />
   <PropertyGroup Condition="'$(Configuration)'=='Debug'">
-    <TargetName>grpc_fetch_oauth2</TargetName>
+    <TargetName>bad_server_response_test</TargetName>
+    <Linkage-grpc_dependencies_zlib>static</Linkage-grpc_dependencies_zlib>
+    <Configuration-grpc_dependencies_zlib>Debug</Configuration-grpc_dependencies_zlib>
+    <Linkage-grpc_dependencies_openssl>static</Linkage-grpc_dependencies_openssl>
+    <Configuration-grpc_dependencies_openssl>Debug</Configuration-grpc_dependencies_openssl>
   </PropertyGroup>
   <PropertyGroup Condition="'$(Configuration)'=='Release'">
-    <TargetName>grpc_fetch_oauth2</TargetName>
+    <TargetName>bad_server_response_test</TargetName>
+    <Linkage-grpc_dependencies_zlib>static</Linkage-grpc_dependencies_zlib>
+    <Configuration-grpc_dependencies_zlib>Release</Configuration-grpc_dependencies_zlib>
+    <Linkage-grpc_dependencies_openssl>static</Linkage-grpc_dependencies_openssl>
+    <Configuration-grpc_dependencies_openssl>Release</Configuration-grpc_dependencies_openssl>
   </PropertyGroup>
     <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
     <ClCompile>
@@ -147,10 +158,13 @@
   </ItemDefinitionGroup>
 
   <ItemGroup>
-    <ClCompile Include="$(SolutionDir)\..\test\core\security\fetch_oauth2.c">
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\bad_server_response_test.c">
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
+    <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\test_tcp_server\test_tcp_server.vcxproj">
+      <Project>{E3110C46-A148-FF65-08FD-3324829BE7FE}</Project>
+    </ProjectReference>
     <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc_test_util\grpc_test_util.vcxproj">
       <Project>{17BCAFC0-5FDC-4C94-AEB9-95F3E220614B}</Project>
     </ProjectReference>
@@ -164,13 +178,25 @@
       <Project>{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}</Project>
     </ProjectReference>
   </ItemGroup>
+  <ItemGroup>
+    <None Include="packages.config" />
+  </ItemGroup>
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
   <ImportGroup Label="ExtensionTargets">
+  <Import Project="$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.redist.1.2.8.10\build\native\grpc.dependencies.zlib.redist.targets" Condition="Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.redist.1.2.8.10\build\native\grpc.dependencies\grpc.dependencies.zlib.targets')" />
+  <Import Project="$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.1.2.8.10\build\native\grpc.dependencies.zlib.targets" Condition="Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.1.2.8.10\build\native\grpc.dependencies\grpc.dependencies.zlib.targets')" />
+  <Import Project="$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.redist.1.0.204.1\build\native\grpc.dependencies.openssl.redist.targets" Condition="Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.redist.1.0.204.1\build\native\grpc.dependencies\grpc.dependencies.openssl.targets')" />
+  <Import Project="$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies.openssl.targets" Condition="Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies\grpc.dependencies.openssl.targets')" />
   </ImportGroup>
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
+    <Error Condition="!Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.redist.1.2.8.10\build\native\grpc.dependencies.zlib.redist.targets')" Text="$([System.String]::Format('$(ErrorText)', '$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.redist.1.2.8.10\build\native\grpc.dependencies.zlib.redist.targets')" />
+    <Error Condition="!Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.1.2.8.10\build\native\grpc.dependencies.zlib.targets')" Text="$([System.String]::Format('$(ErrorText)', '$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.zlib.1.2.8.10\build\native\grpc.dependencies.zlib.targets')" />
+    <Error Condition="!Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.redist.1.0.204.1\build\native\grpc.dependencies.openssl.redist.targets')" Text="$([System.String]::Format('$(ErrorText)', '$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.redist.1.0.204.1\build\native\grpc.dependencies.openssl.redist.targets')" />
+    <Error Condition="!Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies.openssl.props')" Text="$([System.String]::Format('$(ErrorText)', '$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies.openssl.props')" />
+    <Error Condition="!Exists('$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies.openssl.targets')" Text="$([System.String]::Format('$(ErrorText)', '$(SolutionDir)\..\vsprojects\packages\grpc.dependencies.openssl.1.0.204.1\build\native\grpc.dependencies.openssl.targets')" />
   </Target>
 </Project>
 
diff --git a/vsprojects/vcxproj/test/timers_test/timers_test.vcxproj.filters b/vsprojects/vcxproj/test/bad_server_response_test/bad_server_response_test.vcxproj.filters
similarity index 50%
rename from vsprojects/vcxproj/test/timers_test/timers_test.vcxproj.filters
rename to vsprojects/vcxproj/test/bad_server_response_test/bad_server_response_test.vcxproj.filters
index d4901c444c59a388124ff81931f356c19b79b319..13b11ec947220f71304a36720289cab961d923d8 100644
--- a/vsprojects/vcxproj/test/timers_test/timers_test.vcxproj.filters
+++ b/vsprojects/vcxproj/test/bad_server_response_test/bad_server_response_test.vcxproj.filters
@@ -1,20 +1,20 @@
 <?xml version="1.0" encoding="utf-8"?>
 <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
   <ItemGroup>
-    <ClCompile Include="$(SolutionDir)\..\test\core\profiling\timers_test.c">
-      <Filter>test\core\profiling</Filter>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\bad_server_response_test.c">
+      <Filter>test\core\end2end</Filter>
     </ClCompile>
   </ItemGroup>
 
   <ItemGroup>
     <Filter Include="test">
-      <UniqueIdentifier>{babbbe28-0bb3-2bc8-dee6-43fe0bef8f98}</UniqueIdentifier>
+      <UniqueIdentifier>{d29396a6-e5cf-3f1f-a33d-d1e9f2fa1b38}</UniqueIdentifier>
     </Filter>
     <Filter Include="test\core">
-      <UniqueIdentifier>{f321526c-3617-e2b9-8f09-2e15e92bc30a}</UniqueIdentifier>
+      <UniqueIdentifier>{332f26c8-dd3f-091d-9e10-5b704377e991}</UniqueIdentifier>
     </Filter>
-    <Filter Include="test\core\profiling">
-      <UniqueIdentifier>{62fa1b4c-6baf-0b0c-52a9-a5694834dbbb}</UniqueIdentifier>
+    <Filter Include="test\core\end2end">
+      <UniqueIdentifier>{158709cc-74ed-274f-fe50-b8e64cc9830e}</UniqueIdentifier>
     </Filter>
   </ItemGroup>
 </Project>
diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj
index 22cd102d116e14e7aec8c6ac5cc470e4edd59b0f..28ced2dc7ef7b6eafcfa7fb39ce85c8ec83e8386 100644
--- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj
+++ b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj
@@ -199,6 +199,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\negative_deadline.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\network_status_change.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\no_op.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\payload.c">
@@ -225,6 +227,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\simple_request.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\streaming_error_response.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\trailing_metadata.c">
     </ClCompile>
   </ItemGroup>
diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters
index 1bb208bba8e77f1d45dae6f09915b61f7010e370..7c725355d9109127123e07e947678927bcda4111 100644
--- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters
+++ b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters
@@ -73,6 +73,9 @@
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\negative_deadline.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\network_status_change.c">
+      <Filter>test\core\end2end\tests</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\no_op.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
@@ -112,6 +115,9 @@
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\simple_request.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\streaming_error_response.c">
+      <Filter>test\core\end2end\tests</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\trailing_metadata.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj
index bfd437e871726ecfe207f9c0d37bac65713b1b35..bc064cd6acd52a3c2a60db53dacd147c18ab46b9 100644
--- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj
+++ b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj
@@ -201,6 +201,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\negative_deadline.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\network_status_change.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\no_op.c">
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\payload.c">
@@ -227,6 +229,8 @@
     </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\simple_request.c">
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\streaming_error_response.c">
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\trailing_metadata.c">
     </ClCompile>
   </ItemGroup>
diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters
index 61c065f77ca721d09654378dee0fe8dc4ba87a58..d959c804854e6b88869dc9d243f926ed7152e602 100644
--- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters
+++ b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters
@@ -76,6 +76,9 @@
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\negative_deadline.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\network_status_change.c">
+      <Filter>test\core\end2end\tests</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\no_op.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
@@ -115,6 +118,9 @@
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\simple_request.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
+    <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\streaming_error_response.c">
+      <Filter>test\core\end2end\tests</Filter>
+    </ClCompile>
     <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\trailing_metadata.c">
       <Filter>test\core\end2end\tests</Filter>
     </ClCompile>
diff --git a/vsprojects/vcxproj/test/timers_test/timers_test.vcxproj b/vsprojects/vcxproj/test/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj
similarity index 98%
rename from vsprojects/vcxproj/test/timers_test/timers_test.vcxproj
rename to vsprojects/vcxproj/test/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj
index fa1ba6adc62eccfc4048a7af929268350f70d9c4..52bb2697662d91d7f36fdf65311602cc5530dc58 100644
--- a/vsprojects/vcxproj/test/timers_test/timers_test.vcxproj
+++ b/vsprojects/vcxproj/test/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj
@@ -20,7 +20,7 @@
     </ProjectConfiguration>
   </ItemGroup>
   <PropertyGroup Label="Globals">
-    <ProjectGuid>{FFE98236-3F4D-2CBA-29FB-D0A7467D2FA5}</ProjectGuid>
+    <ProjectGuid>{43722E98-54EC-5058-3DAC-327F45964971}</ProjectGuid>
     <IgnoreWarnIntDirInTempDetected>true</IgnoreWarnIntDirInTempDetected>
     <IntDir>$(SolutionDir)IntDir\$(MSBuildProjectName)\</IntDir>
   </PropertyGroup>
@@ -60,14 +60,14 @@
   </ImportGroup>
   <PropertyGroup Label="UserMacros" />
   <PropertyGroup Condition="'$(Configuration)'=='Debug'">
-    <TargetName>timers_test</TargetName>
+    <TargetName>grpc_fetch_oauth2</TargetName>
     <Linkage-grpc_dependencies_zlib>static</Linkage-grpc_dependencies_zlib>
     <Configuration-grpc_dependencies_zlib>Debug</Configuration-grpc_dependencies_zlib>
     <Linkage-grpc_dependencies_openssl>static</Linkage-grpc_dependencies_openssl>
     <Configuration-grpc_dependencies_openssl>Debug</Configuration-grpc_dependencies_openssl>
   </PropertyGroup>
   <PropertyGroup Condition="'$(Configuration)'=='Release'">
-    <TargetName>timers_test</TargetName>
+    <TargetName>grpc_fetch_oauth2</TargetName>
     <Linkage-grpc_dependencies_zlib>static</Linkage-grpc_dependencies_zlib>
     <Configuration-grpc_dependencies_zlib>Release</Configuration-grpc_dependencies_zlib>
     <Linkage-grpc_dependencies_openssl>static</Linkage-grpc_dependencies_openssl>
@@ -158,7 +158,7 @@
   </ItemDefinitionGroup>
 
   <ItemGroup>
-    <ClCompile Include="$(SolutionDir)\..\test\core\profiling\timers_test.c">
+    <ClCompile Include="$(SolutionDir)\..\test\core\security\fetch_oauth2.c">
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
diff --git a/vsprojects/vcxproj/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj.filters b/vsprojects/vcxproj/test/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj.filters
similarity index 100%
rename from vsprojects/vcxproj/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj.filters
rename to vsprojects/vcxproj/test/grpc_fetch_oauth2/grpc_fetch_oauth2.vcxproj.filters