diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc
index a3157db0fb457175680b50022ee7f64dae9b0895..ff092053ad09386f695387a3cdd232b81e96da7d 100644
--- a/src/compiler/objective_c_generator.cc
+++ b/src/compiler/objective_c_generator.cc
@@ -203,6 +203,7 @@ void PrintMethodImplementations(Printer *printer,
     printer.Print(
         "- (instancetype)initWithHost:(NSString *)host"
         " NS_DESIGNATED_INITIALIZER;\n");
+    printer.Print("+ (instancetype)serviceWithHost:(NSString *)host;\n");
     printer.Print("@end\n");
   }
   return output;
@@ -239,6 +240,9 @@ void PrintMethodImplementations(Printer *printer,
     printer.Print("                 packageName:(NSString *)packageName\n");
     printer.Print("                 serviceName:(NSString *)serviceName {\n");
     printer.Print("  return [self initWithHost:host];\n");
+    printer.Print("}\n\n");
+    printer.Print("+ (instancetype)serviceWithHost:(NSString *)host {\n");
+    printer.Print("  return [[self alloc] initWithHost:host];\n");
     printer.Print("}\n\n\n");
 
     for (int i = 0; i < service->method_count(); i++) {
diff --git a/src/core/iomgr/exec_ctx.c b/src/core/iomgr/exec_ctx.c
index a830a27b0ba01027fa7f4fb7ad0a8e0459210553..f2914d376efc16fe70265a5acbd38399226bf609 100644
--- a/src/core/iomgr/exec_ctx.c
+++ b/src/core/iomgr/exec_ctx.c
@@ -35,16 +35,19 @@
 
 #include <grpc/support/log.h>
 
-void grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
+int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
+  int did_something = 0;
   while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
     grpc_closure *c = exec_ctx->closure_list.head;
     exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
     while (c != NULL) {
       grpc_closure *next = c->next;
+      did_something = 1;
       c->cb(exec_ctx, c->cb_arg, c->success);
       c = next;
     }
   }
+  return did_something;
 }
 
 void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
diff --git a/src/core/iomgr/exec_ctx.h b/src/core/iomgr/exec_ctx.h
index f99aa038c59e95828cdbf779c133c890c9387ba0..aa0610cbeaf0b02f3c24699dd3ccbf6f873e0c27 100644
--- a/src/core/iomgr/exec_ctx.h
+++ b/src/core/iomgr/exec_ctx.h
@@ -61,8 +61,9 @@ struct grpc_exec_ctx {
   { GRPC_CLOSURE_LIST_INIT }
 
 /** Flush any work that has been enqueued onto this grpc_exec_ctx.
- *  Caller must guarantee that no interfering locks are held. */
-void grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
+ *  Caller must guarantee that no interfering locks are held. 
+ *  Returns 1 if work was performed, 0 otherwise. */
+int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
 /** Finish any pending work for a grpc_exec_ctx. Must be called before
  *  the instance is destroyed, or work may be lost. */
 void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/iomgr/iocp_windows.c b/src/core/iomgr/iocp_windows.c
index af846f680f75c9d66ee561f6dd75b8fac7a8048f..cf33d74366fa3285630d37fef72e8d1e54df413d 100644
--- a/src/core/iomgr/iocp_windows.c
+++ b/src/core/iomgr/iocp_windows.c
@@ -68,7 +68,7 @@ static DWORD deadline_to_millis_timeout(gpr_timespec deadline,
   }
   timeout = gpr_time_sub(deadline, now);
   return gpr_time_to_millis(gpr_time_add(
-    timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1, GPR_TIMESPAN)));
+    timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
 }
 
 void grpc_iocp_work(grpc_exec_ctx *exec_ctx, gpr_timespec deadline) {
@@ -119,9 +119,7 @@ void grpc_iocp_work(grpc_exec_ctx *exec_ctx, gpr_timespec deadline) {
     info->has_pending_iocp = 1;
   }
   gpr_mu_unlock(&socket->state_mu);
-  if (closure) {
-    closure->cb(exec_ctx, closure->cb_arg, 1);
-  }
+  grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
 }
 
 void grpc_iocp_init(void) {
@@ -139,6 +137,14 @@ void grpc_iocp_kick(void) {
   GPR_ASSERT(success);
 }
 
+void grpc_iocp_flush(void) {
+  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  
+  do {
+    grpc_iocp_work(&exec_ctx, gpr_inf_past(GPR_CLOCK_MONOTONIC));
+  } while (grpc_exec_ctx_flush(&exec_ctx));
+}
+
 void grpc_iocp_shutdown(void) {
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   while (gpr_atm_acq_load(&g_custom_events)) {
diff --git a/src/core/iomgr/iocp_windows.h b/src/core/iomgr/iocp_windows.h
index 7e330e7ce2f57283b812be1fc8ccf58ca4de42ac..75f3ba84770fca354814d332b437a83f2ae5e511 100644
--- a/src/core/iomgr/iocp_windows.h
+++ b/src/core/iomgr/iocp_windows.h
@@ -41,6 +41,7 @@
 void grpc_iocp_work(grpc_exec_ctx *exec_ctx, gpr_timespec deadline);
 void grpc_iocp_init(void);
 void grpc_iocp_kick(void);
+void grpc_iocp_flush(void);
 void grpc_iocp_shutdown(void);
 void grpc_iocp_add_socket(grpc_winsocket *);
 
diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c
index 24108c9b62a2879ff021c17b2f925d28e90b8c9c..e61fc32925fd9f20cebda6e35a2c7873fca85ebb 100644
--- a/src/core/iomgr/iomgr.c
+++ b/src/core/iomgr/iomgr.c
@@ -84,6 +84,8 @@ void grpc_iomgr_shutdown(void) {
   gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
 
+  grpc_iomgr_platform_flush();
+
   gpr_mu_lock(&g_mu);
   g_shutdown = 1;
   while (g_root_object.next != &g_root_object) {
diff --git a/src/core/iomgr/iomgr_internal.h b/src/core/iomgr/iomgr_internal.h
index 1a0724b431c0b925c44978e43ef9f22e244a743b..e372c18e8a070cbba634bcea67ea7d80110b461c 100644
--- a/src/core/iomgr/iomgr_internal.h
+++ b/src/core/iomgr/iomgr_internal.h
@@ -50,6 +50,9 @@ void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name);
 void grpc_iomgr_unregister_object(grpc_iomgr_object *obj);
 
 void grpc_iomgr_platform_init(void);
+/** flush any globally queued work from iomgr */
+void grpc_iomgr_platform_flush(void);
+/** tear down all platform specific global iomgr structures */
 void grpc_iomgr_platform_shutdown(void);
 
 #endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H */
diff --git a/src/core/iomgr/iomgr_posix.c b/src/core/iomgr/iomgr_posix.c
index db93d0a75618305c32563ec4487260b02ad8789a..f6474b7e6d4b9af0730aedd6ba6efa0f8a2fda65 100644
--- a/src/core/iomgr/iomgr_posix.c
+++ b/src/core/iomgr/iomgr_posix.c
@@ -45,6 +45,9 @@ void grpc_iomgr_platform_init(void) {
   grpc_register_tracer("tcp", &grpc_tcp_trace);
 }
 
+void grpc_iomgr_platform_flush(void) {
+}
+
 void grpc_iomgr_platform_shutdown(void) {
   grpc_fd_global_shutdown();
 }
diff --git a/src/core/iomgr/iomgr_windows.c b/src/core/iomgr/iomgr_windows.c
index b49cb87e97b00bbb50ffc92eda3fe698b6db1f5f..93bdc5ec16e9e3b02011f96f6e2f9744d643d492 100644
--- a/src/core/iomgr/iomgr_windows.c
+++ b/src/core/iomgr/iomgr_windows.c
@@ -63,6 +63,10 @@ void grpc_iomgr_platform_init(void) {
   grpc_iocp_init();
 }
 
+void grpc_iomgr_platform_flush(void) {
+  grpc_iocp_flush();
+}
+
 void grpc_iomgr_platform_shutdown(void) {
   grpc_iocp_shutdown();
   winsock_shutdown();
diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c
index bdcc3a19f8a63a12f29ae50e31494ab2e1db8330..d26e60f665007f075544e44111f27b8f8d2f006b 100644
--- a/src/core/iomgr/pollset_multipoller_with_epoll.c
+++ b/src/core/iomgr/pollset_multipoller_with_epoll.c
@@ -180,6 +180,8 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
   pfds[1].events = POLLIN;
   pfds[1].revents = 0;
 
+  /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+     even going into the blocking annotation if possible */
   GRPC_SCHEDULING_START_BLOCKING_REGION;
   poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
   GRPC_SCHEDULING_END_BLOCKING_REGION;
@@ -209,15 +211,15 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
             /* TODO(klempner): We might want to consider making err and pri
              * separate events */
             int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
-            int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
-            int write = ep_ev[i].events & EPOLLOUT;
+            int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
+            int write_ev = ep_ev[i].events & EPOLLOUT;
             if (fd == NULL) {
               grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
             } else {
-              if (read || cancel) {
+              if (read_ev || cancel) {
                 grpc_fd_become_readable(exec_ctx, fd);
               }
-              if (write || cancel) {
+              if (write_ev || cancel) {
                 grpc_fd_become_writable(exec_ctx, fd);
               }
             }
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index 240e9daf8eee6cc337db18e535abab345260d2e9..1356ebe7a0659a40b3204a225b97b36e8c1d4d44 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -151,6 +151,8 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
                                                POLLOUT, &watchers[i]);
   }
 
+  /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+     even going into the blocking annotation if possible */
   GRPC_SCHEDULING_START_BLOCKING_REGION;
   r = grpc_poll_function(pfds, pfd_count, timeout);
   GRPC_SCHEDULING_END_BLOCKING_REGION;
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index 7adb0e626ea6e437c2f98557218cfff112029024..f4e4ce60457aa6040b6ab126036210e10b107efd 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -481,6 +481,8 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
     gpr_mu_unlock(&pollset->mu);
   }
 
+  /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+     even going into the blocking annotation if possible */
   /* poll fd count (argument 2) is shortened by one if we have no events
      to poll on - such that it only includes the kicker */
   GRPC_SCHEDULING_START_BLOCKING_REGION;
diff --git a/src/core/iomgr/pollset_windows.c b/src/core/iomgr/pollset_windows.c
index 4b82a4849e152c0f11846dc49a6fcc13f0237e59..1b4935a8416f31a6f7f890acff3f498eb3c81b23 100644
--- a/src/core/iomgr/pollset_windows.c
+++ b/src/core/iomgr/pollset_windows.c
@@ -43,12 +43,12 @@
 #include "src/core/iomgr/pollset.h"
 #include "src/core/iomgr/pollset_windows.h"
 
-static gpr_mu g_polling_mu;
+gpr_mu grpc_polling_mu;
 static grpc_pollset_worker *g_active_poller;
 static grpc_pollset_worker g_global_root_worker;
 
 void grpc_pollset_global_init() {
-  gpr_mu_init(&g_polling_mu);
+  gpr_mu_init(&grpc_polling_mu);
   g_active_poller = NULL;
   g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
     g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev =
@@ -56,7 +56,7 @@ void grpc_pollset_global_init() {
 }
 
 void grpc_pollset_global_shutdown() {
-  gpr_mu_destroy(&g_polling_mu);
+  gpr_mu_destroy(&grpc_polling_mu);
 }
 
 static void remove_worker(grpc_pollset_worker *worker, 
@@ -108,7 +108,6 @@ static void push_front_worker(grpc_pollset_worker *root,
 
 void grpc_pollset_init(grpc_pollset *pollset) {
   memset(pollset, 0, sizeof(*pollset));
-  gpr_mu_init(&pollset->mu);
   pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next = 
     pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev = 
     &pollset->root_worker;
@@ -116,7 +115,7 @@ void grpc_pollset_init(grpc_pollset *pollset) {
 
 void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                            grpc_closure *closure) {
-  gpr_mu_lock(&pollset->mu);
+  gpr_mu_lock(&grpc_polling_mu);
   pollset->shutting_down = 1;
   grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
   if (!pollset->is_iocp_worker) {
@@ -124,11 +123,10 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
   } else {
     pollset->on_shutdown = closure;
   }
-  gpr_mu_unlock(&pollset->mu);
+  gpr_mu_unlock(&grpc_polling_mu);
 }
 
 void grpc_pollset_destroy(grpc_pollset *pollset) {
-  gpr_mu_destroy(&pollset->mu);
 }
 
 void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -137,30 +135,35 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
   int added_worker = 0;
   worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next = 
     worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev = 
+    worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
+    worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev =
     NULL;
+  worker->kicked = 0;
+  worker->pollset = pollset;
   gpr_cv_init(&worker->cv);
   if (grpc_alarm_check(exec_ctx, now, &deadline)) {
     goto done;
   }
   if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
-    gpr_mu_lock(&g_polling_mu);
     if (g_active_poller == NULL) {
       grpc_pollset_worker *next_worker;
       /* become poller */
       pollset->is_iocp_worker = 1;
       g_active_poller = worker;
-      gpr_mu_unlock(&g_polling_mu);
-      gpr_mu_unlock(&pollset->mu);
+      gpr_mu_unlock(&grpc_polling_mu);
       grpc_iocp_work(exec_ctx, deadline);
-      gpr_mu_lock(&pollset->mu);
-      gpr_mu_lock(&g_polling_mu);
+      grpc_exec_ctx_flush(exec_ctx);
+      gpr_mu_lock(&grpc_polling_mu);
       pollset->is_iocp_worker = 0;
       g_active_poller = NULL;
+      /* try to get a worker from this pollsets worker list */
+      next_worker = pop_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
+      /* try to get a worker from the global list */
       next_worker = pop_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
       if (next_worker != NULL) {
+        next_worker->kicked = 1;
         gpr_cv_signal(&next_worker->cv);
       }
-      gpr_mu_unlock(&g_polling_mu);
 
       if (pollset->shutting_down && pollset->on_shutdown != NULL) {
         grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1);
@@ -169,24 +172,27 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
       goto done;
     }
     push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL, worker);
-    gpr_mu_unlock(&g_polling_mu);
     push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET, worker);
     added_worker = 1;
-    gpr_cv_wait(&worker->cv, &pollset->mu, deadline);
+    while (!worker->kicked) {
+      if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) {
+        break;
+      }
+    }
   } else {
     pollset->kicked_without_pollers = 0;
   }
 done:
   if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
-    gpr_mu_unlock(&pollset->mu);
+    gpr_mu_unlock(&grpc_polling_mu);
     grpc_exec_ctx_flush(exec_ctx);
-    gpr_mu_lock(&pollset->mu);
+    gpr_mu_lock(&grpc_polling_mu);
   }
-  gpr_cv_destroy(&worker->cv);
   if (added_worker) {
     remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
     remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
   }
+  gpr_cv_destroy(&worker->cv);
 }
 
 void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
@@ -195,6 +201,7 @@ void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
       for (specific_worker = p->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next;
            specific_worker != &p->root_worker;
            specific_worker = specific_worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next) {
+        specific_worker->kicked = 1;
         gpr_cv_signal(&specific_worker->cv);
       }
       p->kicked_without_pollers = 1;
@@ -203,12 +210,11 @@ void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
       }
     } else {
       if (p->is_iocp_worker) {
-        gpr_mu_lock(&g_polling_mu);
         if (g_active_poller == specific_worker) {
           grpc_iocp_kick();
         }
-        gpr_mu_unlock(&g_polling_mu);
       } else {
+        specific_worker->kicked = 1;
         gpr_cv_signal(&specific_worker->cv);
       }
     }
diff --git a/src/core/iomgr/pollset_windows.h b/src/core/iomgr/pollset_windows.h
index 55f87aca729d0172c85e90cac5142106e479213b..65ba80619b77437fa8f263d2fb5bf9966c22449d 100644
--- a/src/core/iomgr/pollset_windows.h
+++ b/src/core/iomgr/pollset_windows.h
@@ -54,20 +54,26 @@ typedef struct grpc_pollset_worker_link {
   struct grpc_pollset_worker *prev;
 } grpc_pollset_worker_link;
 
+struct grpc_pollset;
+typedef struct grpc_pollset grpc_pollset;
+
 typedef struct grpc_pollset_worker {
   gpr_cv cv;
+  int kicked;
+  struct grpc_pollset *pollset;
   grpc_pollset_worker_link links[GRPC_POLLSET_WORKER_LINK_TYPES];
 } grpc_pollset_worker;
 
-typedef struct grpc_pollset {
-  gpr_mu mu;
+struct grpc_pollset {
   int shutting_down;
   int kicked_without_pollers;
   int is_iocp_worker;
   grpc_pollset_worker root_worker;
   grpc_closure *on_shutdown;
-} grpc_pollset;
+};
+
+extern gpr_mu grpc_polling_mu;
 
-#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
+#define GRPC_POLLSET_MU(pollset) (&grpc_polling_mu)
 
 #endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index a582f4a7c315f4e072dea564a429efeed983be7d..13bd67576f55d9adf90d36c64c69efcaed7f3c41 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -478,8 +478,8 @@ done:
   return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
 }
 
-int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index) {
-  return (index < s->nports) ? s->ports[index].fd : -1;
+int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned port_index) {
+  return (port_index < s->nports) ? s->ports[port_index].fd : -1;
 }
 
 void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
diff --git a/src/core/iomgr/udp_server.c b/src/core/iomgr/udp_server.c
index 9baaf1edc78cb63df2d042078c61c5c385f41134..a8d611c3f2d08739e780aa12d30e29bc59c55fb3 100644
--- a/src/core/iomgr/udp_server.c
+++ b/src/core/iomgr/udp_server.c
@@ -278,7 +278,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
 
   /* Tell the registered callback that data is available to read. */
   GPR_ASSERT(sp->read_cb);
-  sp->read_cb(sp->fd, sp->server->grpc_server);
+  sp->read_cb(sp->emfd, sp->server->grpc_server);
 
   /* Re-arm the notification event so we get another chance to read. */
   grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
@@ -399,8 +399,8 @@ done:
   return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
 }
 
-int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index) {
-  return (index < s->nports) ? s->ports[index].fd : -1;
+int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
+  return (port_index < s->nports) ? s->ports[port_index].fd : -1;
 }
 
 void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
diff --git a/src/core/iomgr/udp_server.h b/src/core/iomgr/udp_server.h
index 8e3abae864ba2cfe16251bee3e968946b354db19..dbbe09710932d615875ee77f53dca2b357222291 100644
--- a/src/core/iomgr/udp_server.h
+++ b/src/core/iomgr/udp_server.h
@@ -43,7 +43,7 @@ typedef struct grpc_server grpc_server;
 typedef struct grpc_udp_server grpc_udp_server;
 
 /* Called when data is available to read from the socket. */
-typedef void (*grpc_udp_server_read_cb)(int fd, grpc_server *server);
+typedef void (*grpc_udp_server_read_cb)(grpc_fd *emfd, grpc_server *server);
 
 /* Create a server, initially not bound to any ports */
 grpc_udp_server *grpc_udp_server_create(void);
diff --git a/src/core/support/histogram.c b/src/core/support/histogram.c
index 78dbf98684ace19e79f92e0fddc1ff6b390517ca..8a1a9d92330d2210d2195dc4adea0c0c8f30b5c7 100644
--- a/src/core/support/histogram.c
+++ b/src/core/support/histogram.c
@@ -212,7 +212,7 @@ double gpr_histogram_percentile(gpr_histogram *h, double percentile) {
 }
 
 double gpr_histogram_mean(gpr_histogram *h) {
-  GPR_ASSERT(h->count);
+  GPR_ASSERT(h->count != 0);
   return h->sum / h->count;
 }
 
diff --git a/src/core/support/time_posix.c b/src/core/support/time_posix.c
index eedfd0a060762c37ec2394d0464e9d615341156a..78f2c2bb77a6fb2c40155cbb90190d610fa86827 100644
--- a/src/core/support/time_posix.c
+++ b/src/core/support/time_posix.c
@@ -52,11 +52,11 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
 
 #if _POSIX_TIMERS > 0
 static gpr_timespec gpr_from_timespec(struct timespec ts,
-                                      gpr_clock_type clock) {
+                                      gpr_clock_type clock_type) {
   gpr_timespec rv;
   rv.tv_sec = ts.tv_sec;
   rv.tv_nsec = (int)ts.tv_nsec;
-  rv.clock_type = clock;
+  rv.clock_type = clock_type;
   return rv;
 }
 
@@ -65,16 +65,16 @@ static clockid_t clockid_for_gpr_clock[] = {CLOCK_MONOTONIC, CLOCK_REALTIME};
 
 void gpr_time_init(void) {}
 
-gpr_timespec gpr_now(gpr_clock_type clock) {
+gpr_timespec gpr_now(gpr_clock_type clock_type) {
   struct timespec now;
-  GPR_ASSERT(clock != GPR_TIMESPAN);
-  if (clock == GPR_CLOCK_PRECISE) {
+  GPR_ASSERT(clock_type != GPR_TIMESPAN);
+  if (clock_type == GPR_CLOCK_PRECISE) {
     gpr_timespec ret;
     gpr_precise_clock_now(&ret);
     return ret;
   } else {
-    clock_gettime(clockid_for_gpr_clock[clock], &now);
-    return gpr_from_timespec(now, clock);
+    clock_gettime(clockid_for_gpr_clock[clock_type], &now);
+    return gpr_from_timespec(now, clock_type);
   }
 }
 #else
diff --git a/src/core/transport/chttp2/hpack_table.c b/src/core/transport/chttp2/hpack_table.c
index d5cb752789f1862cc2ac908dfe9cee651472c772..c442c2c34139c50fd4ebfe6ae3b029a0c8fdca82 100644
--- a/src/core/transport/chttp2/hpack_table.c
+++ b/src/core/transport/chttp2/hpack_table.c
@@ -193,15 +193,15 @@ void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl *tbl) {
 }
 
 grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
-                                      gpr_uint32 index) {
+                                      gpr_uint32 tbl_index) {
   /* Static table comes first, just return an entry from it */
-  if (index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
-    return tbl->static_ents[index - 1];
+  if (tbl_index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
+    return tbl->static_ents[tbl_index - 1];
   }
   /* Otherwise, find the value in the list of valid entries */
-  index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
-  if (index < tbl->num_ents) {
-    gpr_uint32 offset = (tbl->num_ents - 1u - index + tbl->first_ent) %
+  tbl_index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
+  if (tbl_index < tbl->num_ents) {
+    gpr_uint32 offset = (tbl->num_ents - 1u - tbl_index + tbl->first_ent) %
                         GRPC_CHTTP2_MAX_TABLE_COUNT;
     return tbl->ents[offset];
   }
diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/stream_encoder.c
index b1f1db05d26cfe2781581d52a2a425e656aed9c7..ec97af3d5dd3b472153c9d65cdc655fc44ad92fe 100644
--- a/src/core/transport/chttp2/stream_encoder.c
+++ b/src/core/transport/chttp2/stream_encoder.c
@@ -274,10 +274,11 @@ static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
   return elem_to_unref;
 }
 
-static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 index,
+static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 elem_index,
                          framer_state *st) {
-  gpr_uint32 len = GRPC_CHTTP2_VARINT_LENGTH(index, 1);
-  GRPC_CHTTP2_WRITE_VARINT(index, 1, 0x80, add_tiny_header_data(st, len), len);
+  gpr_uint32 len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
+  GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
+			   len);
 }
 
 static gpr_slice get_wire_value(grpc_mdelem *elem, gpr_uint8 *huffman_prefix) {
@@ -363,9 +364,10 @@ static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
   add_header_data(st, gpr_slice_ref(value_slice));
 }
 
-static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c, gpr_uint32 index) {
+static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c,
+			 gpr_uint32 elem_index) {
   return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
-         c->table_elems - index;
+         c->table_elems - elem_index;
 }
 
 /* encode an mdelem; returns metadata element to unref */
diff --git a/src/core/transport/stream_op.c b/src/core/transport/stream_op.c
index 038586d48e220d3f0573699cddb2ec71df0d25dd..1cb2bd7c598558e2b029d35ac92c2b8d131a13f1 100644
--- a/src/core/transport/stream_op.c
+++ b/src/core/transport/stream_op.c
@@ -274,14 +274,14 @@ void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
 }
 
 void grpc_metadata_batch_merge(grpc_metadata_batch *target,
-                               grpc_metadata_batch *add) {
+                               grpc_metadata_batch *to_add) {
   grpc_linked_mdelem *l;
   grpc_linked_mdelem *next;
-  for (l = add->list.head; l; l = next) {
+  for (l = to_add->list.head; l; l = next) {
     next = l->next;
     link_tail(&target->list, l);
   }
-  for (l = add->garbage.head; l; l = next) {
+  for (l = to_add->garbage.head; l; l = next) {
     next = l->next;
     link_tail(&target->garbage, l);
   }
diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc
index 1693cf740bd4dca8645832333d1f79f484ba9dbd..8299ebeb8a233499ec6bf9d8bc007e9b120fe44b 100644
--- a/src/cpp/client/secure_credentials.cc
+++ b/src/cpp/client/secure_credentials.cc
@@ -154,10 +154,10 @@ void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
 void MetadataCredentialsPluginWrapper::GetMetadata(
     void* wrapper, const char* service_url,
     grpc_credentials_plugin_metadata_cb cb, void* user_data) {
-  GPR_ASSERT(wrapper != nullptr);
+  GPR_ASSERT(wrapper);
   MetadataCredentialsPluginWrapper* w =
       reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
-  if (w->plugin_ == nullptr) {
+  if (!w->plugin_) {
     cb(user_data, NULL, 0, GRPC_STATUS_OK, NULL);
     return;
   }
@@ -177,11 +177,12 @@ void MetadataCredentialsPluginWrapper::InvokePlugin(
   Status status = plugin_->GetMetadata(service_url, &metadata);
   std::vector<grpc_metadata> md;
   for (auto it = metadata.begin(); it != metadata.end(); ++it) {
-    md.push_back({it->first.c_str(),
-                  it->second.data(),
-                  it->second.size(),
-                  0,
-                  {{nullptr, nullptr, nullptr, nullptr}}});
+    grpc_metadata md_entry;
+    md_entry.key = it->first.c_str();
+    md_entry.value = it->second.data();
+    md_entry.value_length = it->second.size();
+    md_entry.flags = 0;
+    md.push_back(md_entry);
   }
   cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
      static_cast<grpc_status_code>(status.error_code()),
diff --git a/src/cpp/server/secure_server_credentials.cc b/src/cpp/server/secure_server_credentials.cc
index 90afebfd2e22b4a9d6b6e00cb7f79a05665a8dba..7c828cb1257c56151fbe0d86efe8dcf2b44fa0ce 100644
--- a/src/cpp/server/secure_server_credentials.cc
+++ b/src/cpp/server/secure_server_credentials.cc
@@ -52,7 +52,7 @@ void AuthMetadataProcessorAyncWrapper::Process(
     void* wrapper, grpc_auth_context* context, const grpc_metadata* md,
     size_t num_md, grpc_process_auth_metadata_done_cb cb, void* user_data) {
   auto* w = reinterpret_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
-  if (w->processor_ == nullptr) {
+  if (!w->processor_) {
     // Early exit.
     cb(user_data, nullptr, 0, nullptr, 0, GRPC_STATUS_OK, nullptr);
     return;
@@ -86,20 +86,22 @@ void AuthMetadataProcessorAyncWrapper::InvokeProcessor(
   std::vector<grpc_metadata> consumed_md;
   for (auto it = consumed_metadata.begin(); it != consumed_metadata.end();
        ++it) {
-    consumed_md.push_back({it->first.c_str(),
-                           it->second.data(),
-                           it->second.size(),
-                           0,
-                           {{nullptr, nullptr, nullptr, nullptr}}});
+    grpc_metadata md_entry;
+    md_entry.key = it->first.c_str();
+    md_entry.value = it->second.data();
+    md_entry.value_length = it->second.size();
+    md_entry.flags = 0;
+    consumed_md.push_back(md_entry);
   }
   std::vector<grpc_metadata> response_md;
   for (auto it = response_metadata.begin(); it != response_metadata.end();
        ++it) {
-    response_md.push_back({it->first.c_str(),
-                           it->second.data(),
-                           it->second.size(),
-                           0,
-                           {{nullptr, nullptr, nullptr, nullptr}}});
+    grpc_metadata md_entry;
+    md_entry.key = it->first.c_str();
+    md_entry.value = it->second.data();
+    md_entry.value_length = it->second.size();
+    md_entry.flags = 0;
+    response_md.push_back(md_entry);
   }
   auto consumed_md_data = consumed_md.empty() ? nullptr : &consumed_md[0];
   auto response_md_data = response_md.empty() ? nullptr : &response_md[0];
diff --git a/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs b/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
index a2ee183272443069e1eb76213bad4364b4a6430b..c6843f10af25744153b8da6095eade1c4d260d92 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
@@ -45,17 +45,21 @@ namespace Grpc.Core.Internal.Tests
         [Test]
         public void CreateAndDestroy()
         {
+            GrpcEnvironment.AddRef();
             var cq = CompletionQueueSafeHandle.Create();
             cq.Dispose();
+            GrpcEnvironment.Release();
         }
 
         [Test]
         public void CreateAndShutdown()
         {
+            GrpcEnvironment.AddRef();
             var cq = CompletionQueueSafeHandle.Create();
             cq.Shutdown();
             var ev = cq.Next();
             cq.Dispose();
+            GrpcEnvironment.Release();
             Assert.AreEqual(GRPCCompletionType.Shutdown, ev.type);
             Assert.AreNotEqual(IntPtr.Zero, ev.success);
             Assert.AreEqual(IntPtr.Zero, ev.tag);
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
index 616093d4aebf8503bcf39cf3a4a92d2d57da228f..504d798b8936f7e3d54f75dcde1525a99d81077d 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
@@ -126,8 +126,6 @@ namespace Grpc.IntegrationTesting
                     new ChannelOption(ChannelOptions.SslTargetNameOverride, options.ServerHostOverride)
                 };
             }
-            Console.WriteLine(options.ServerHost);
-            Console.WriteLine(options.ServerPort);
             var channel = new Channel(options.ServerHost, options.ServerPort, credentials, channelOptions);
             TestService.TestServiceClient client = new TestService.TestServiceClient(channel);
             await RunTestCaseAsync(client, options);
diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m
index 1b63fe2059c985388799194518eda9c23c996a74..af58e2bd0489bdba3dd5979d748c0a30adf2ffce 100644
--- a/src/objective-c/tests/InteropTests.m
+++ b/src/objective-c/tests/InteropTests.m
@@ -89,7 +89,7 @@ static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.google.com";
 }
 
 - (void)setUp {
-  _service = [[RMTTestService alloc] initWithHost:self.class.host];
+  _service = [RMTTestService serviceWithHost:self.class.host];
 }
 
 - (void)testEmptyUnaryRPC {
@@ -274,17 +274,17 @@ static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.google.com";
 
 - (void)testCancelAfterFirstResponseRPC {
   __weak XCTestExpectation *expectation = [self expectationWithDescription:@"CancelAfterFirstResponse"];
-  
+
   // A buffered pipe to which we write a single value but never close
   GRXBufferedPipe *requestsBuffer = [[GRXBufferedPipe alloc] init];
-  
+
   __block BOOL receivedResponse = NO;
-  
+
   id request = [RMTStreamingOutputCallRequest messageWithPayloadSize:@21782
                                                requestedResponseSize:@31415];
-  
+
   [requestsBuffer writeValue:request];
-  
+
   __block ProtoRPC *call =
       [_service RPCToFullDuplexCallWithRequestsWriter:requestsBuffer
                                          eventHandler:^(BOOL done,
diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c
index 6d3dfeeb57c7ae953cb5200b02283f4f9347a645..fc0026da4df9a5b2ebf43650ef686de6460fe02e 100644
--- a/test/core/iomgr/udp_server_test.c
+++ b/test/core/iomgr/udp_server_test.c
@@ -49,12 +49,12 @@ static grpc_pollset g_pollset;
 static int g_number_of_reads = 0;
 static int g_number_of_bytes_read = 0;
 
-static void on_read(int fd, grpc_server *server) {
+static void on_read(grpc_fd *emfd, grpc_server *server) {
   char read_buffer[512];
   ssize_t byte_count;
 
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
-  byte_count = recv(fd, read_buffer, sizeof(read_buffer), 0);
+  byte_count = recv(emfd->fd, read_buffer, sizeof(read_buffer), 0);
 
   g_number_of_reads++;
   g_number_of_bytes_read += (int)byte_count;
diff --git a/test/cpp/end2end/streaming_throughput_test.cc b/test/cpp/end2end/streaming_throughput_test.cc
index d64d96fe939328ee373603f5104330a8c24a61b8..344bf507ce108e9f22bd5ef95a51732ee53ef78f 100644
--- a/test/cpp/end2end/streaming_throughput_test.cc
+++ b/test/cpp/end2end/streaming_throughput_test.cc
@@ -31,9 +31,9 @@
  *
  */
 
-#include <atomic>
 #include <mutex>
 #include <thread>
+#include <time.h>
 
 #include <grpc++/channel.h>
 #include <grpc++/client_context.h>
@@ -44,6 +44,7 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc/grpc.h>
+#include <grpc/support/atm.h>
 #include <grpc/support/thd.h>
 #include <grpc/support/time.h>
 #include <gtest/gtest.h>
@@ -99,12 +100,17 @@ namespace testing {
 
 class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service {
  public:
-  static void BidiStream_Sender(ServerReaderWriter<EchoResponse, EchoRequest>* stream, std::atomic<bool>* should_exit) {
+  static void BidiStream_Sender(ServerReaderWriter<EchoResponse, EchoRequest>* stream, gpr_atm* should_exit) {
     EchoResponse response;
     response.set_message(kLargeString);
-    while (!should_exit->load()) {
-      // TODO(vpai): Decide if the below requires blocking annotation
-      std::this_thread::sleep_for(std::chrono::milliseconds(1));
+    while (gpr_atm_acq_load(should_exit) == static_cast<gpr_atm>(0)) {
+      struct timespec tv = {0, 1000000}; // 1 ms
+      struct timespec rem;
+      // TODO (vpai): Mark this blocking
+      while (nanosleep(&tv, &rem) != 0) {
+	tv = rem;
+      };
+
       stream->Write(response);
     }
   }
@@ -114,14 +120,20 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service {
                     ServerReaderWriter<EchoResponse, EchoRequest>* stream)
       GRPC_OVERRIDE {
     EchoRequest request;
-    std::atomic<bool> should_exit(false);
+    gpr_atm should_exit;
+    gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(0));
+
     std::thread sender(std::bind(&TestServiceImpl::BidiStream_Sender, stream, &should_exit));
 
     while (stream->Read(&request)) {
-      // TODO(vpai): Decide if the below requires blocking annotation
-      std::this_thread::sleep_for(std::chrono::milliseconds(3));
+      struct timespec tv = {0, 3000000}; // 3 ms
+      struct timespec rem;
+      // TODO (vpai): Mark this blocking
+      while (nanosleep(&tv, &rem) != 0) {
+	tv = rem;
+      };
     }
-    should_exit.store(true);
+    gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(1));
     sender.join();
     return Status::OK;
   }
@@ -145,7 +157,7 @@ class End2endTest : public ::testing::Test {
   void ResetStub() {
     std::shared_ptr<Channel> channel = CreateChannel(
         server_address_.str(), InsecureCredentials());
-    stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel));
+    stub_ = grpc::cpp::test::util::TestService::NewStub(channel);
   }
 
   std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_;
diff --git a/tools/jenkins/build_docker_and_run_interop_tests.sh b/tools/jenkins/build_docker_and_run_interop_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d2d56d947bf7f0bdc30b27eedb8744c2ef5b834b
--- /dev/null
+++ b/tools/jenkins/build_docker_and_run_interop_tests.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This script is invoked by run_interop_tests.py to accommodate
+# "interop tests under docker" scenario. You should never need to call this
+# script on your own.
+
+set -ex
+
+cd `dirname $0`/../..
+git_root=`pwd`
+cd -
+
+mkdir -p /tmp/ccache
+
+# Use image name based on Dockerfile checksum
+DOCKER_IMAGE_NAME=grpc_jenkins_slave${docker_suffix}_`sha1sum tools/jenkins/grpc_jenkins_slave/Dockerfile | cut -f1 -d\ `
+
+# Make sure docker image has been built. Should be instantaneous if so.
+docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_jenkins_slave$docker_suffix
+
+# Create a local branch so the child Docker script won't complain
+git branch -f jenkins-docker
+
+# Make sure the CID files are gone.
+rm -f prepare.cid server.cid client.cid
+
+# Prepare image for interop tests
+docker run \
+  -e CCACHE_DIR=/tmp/ccache \
+  -i $TTY_FLAG \
+  -v "$git_root:/var/local/jenkins/grpc" \
+  -v /tmp/ccache:/tmp/ccache \
+  --cidfile=prepare.cid \
+  $DOCKER_IMAGE_NAME \
+  bash -l /var/local/jenkins/grpc/tools/jenkins/docker_prepare_interop_tests.sh || DOCKER_FAILED="true"
+
+PREPARE_CID=`cat prepare.cid`
+
+# Create image from the container, we will spawn one docker for clients
+# and one for servers.
+INTEROP_IMAGE=interop_`uuidgen`
+docker commit $PREPARE_CID $INTEROP_IMAGE
+# remove container, possibly killing it first
+docker rm -f $PREPARE_CID || true
+echo "Successfully built image $INTEROP_IMAGE"
+
+# run interop servers under docker in the background
+docker run \
+  -d -i \
+  $SERVERS_DOCKER_EXTRA_ARGS \
+  --cidfile=server.cid \
+  $INTEROP_IMAGE bash -l /var/local/git/grpc/tools/jenkins/docker_run_interop_servers.sh
+
+SERVER_CID=`cat server.cid`
+
+SERVER_PORTS=""
+for tuple in $SERVER_PORT_TUPLES
+do
+  # lookup under which port docker exposes given internal port
+  exposed_port=`docker port $SERVER_CID ${tuple#*:} | awk -F ":" '{print $NF}'`
+
+  # override the port for corresponding cloud_to_cloud server
+  SERVER_PORTS+=" --override_server ${tuple%:*}=localhost:$exposed_port"
+  echo "${tuple%:*} server is exposed under port $exposed_port"
+done
+
+# run interop clients
+docker run \
+  -e "RUN_TESTS_COMMAND=$RUN_TESTS_COMMAND $SERVER_PORTS" \
+  -w /var/local/git/grpc \
+  -i $TTY_FLAG \
+  --net=host \
+  --cidfile=client.cid \
+  $INTEROP_IMAGE bash -l /var/local/git/grpc/tools/jenkins/docker_run_interop_tests.sh || DOCKER_FAILED="true"
+
+CLIENT_CID=`cat client.cid`
+
+echo "killing and removing server container $SERVER_CID"
+docker rm -f $SERVER_CID || true
+
+docker cp $CLIENT_CID:/var/local/git/grpc/report.xml $git_root
+docker rm -f $CLIENT_CID || true
+docker rmi -f $DOCKER_IMAGE_NAME || true
diff --git a/tools/jenkins/docker_prepare_interop_tests.sh b/tools/jenkins/docker_prepare_interop_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a1fe0b72d2fc4b149d8c6ca5cd8b740504e757b8
--- /dev/null
+++ b/tools/jenkins/docker_prepare_interop_tests.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This script is invoked by run_jekins.sh. It contains the test logic
+# that should run inside a docker container.
+set -e
+
+mkdir -p /var/local/git
+git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
+
+cd /var/local/git/grpc
+nvm use 0.12
+rvm use ruby-2.1
+
+# TODO(jtattermusch): use cleaner way to install root certs
+mkdir -p /usr/local/share/grpc
+cp etc/roots.pem /usr/local/share/grpc/
+
+# build C++ interop client & server
+make interop_client interop_server
+
+# build C# interop client & server
+make install_grpc_csharp_ext
+(cd src/csharp && mono /var/local/NuGet.exe restore Grpc.sln)
+(cd src/csharp && xbuild Grpc.sln)
+
+# build Node interop client & server
+npm install -g node-gyp
+make install_c -C /var/local/git/grpc
+(cd src/node && npm install && node-gyp rebuild)
+
+# build Ruby interop client and server
+(cd src/ruby && gem update bundler && bundle && rake compile:grpc)
+
+# TODO(jtattermusch): add python
+
+# build PHP interop client
+# TODO(jtattermusch): prerequisites for PHP should be installed sooner than here.
+# Install composer
+curl -sS https://getcomposer.org/installer | php
+mv composer.phar /usr/local/bin/composer
+# Download the patched PHP protobuf so that PHP gRPC clients can be generated
+# from proto3 schemas.
+git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php
+(cd src/php/ext/grpc && phpize && ./configure && make)
+rvm all do gem install ronn rake
+(cd third_party/protobuf && make install)
+(cd /var/local/git/protobuf-php \
+  && rvm all do rake pear:package version=1.0 \
+  && pear install Protobuf-1.0.tgz)
+(cd src/php && composer install)
+(cd src/php && protoc-gen-php -i tests/interop/ -o tests/interop/ tests/interop/test.proto)
diff --git a/tools/run_tests/run_interops_test.sh b/tools/jenkins/docker_run_interop_servers.sh
similarity index 53%
rename from tools/run_tests/run_interops_test.sh
rename to tools/jenkins/docker_run_interop_servers.sh
index 9be253af46088a7cb41f127808b3e76bd35a050a..9f29a65aaa4dec5fcb8801c0e6ae73a552315030 100755
--- a/tools/run_tests/run_interops_test.sh
+++ b/tools/jenkins/docker_run_interop_servers.sh
@@ -1,5 +1,4 @@
-#!/bin/sh
-
+#!/bin/bash
 # Copyright 2015, Google Inc.
 # All rights reserved.
 #
@@ -28,27 +27,24 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This script is invoked by run_jekins.sh. It contains the test logic
+# that should run inside a docker container.
+set -e
 
-language=$1
-test_case=$2
+cd /var/local/git/grpc
+nvm use 0.12
+rvm use ruby-2.1
 
-set -e
-if [ "$language" = "c++" ]
-then
-  sudo docker run grpc/cxx /var/local/git/grpc/bins/opt/interop_client --enable_ssl --use_prod_roots --server_host_override=grpc-test.sandbox.google.com --server_host=grpc-test.sandbox.google.com --server_port=443 --test_case=$test_case
-elif [ "$language" = "node" ]
-then
-  sudo docker run grpc/node /usr/bin/nodejs /var/local/git/grpc/src/node/interop/interop_client.js --use_tls=true --use_test_ca=true --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com --test_case=$test_case
-elif [ "$language" = "ruby" ]
-then
-  cmd_prefix="SSL_CERT_FILE=/cacerts/roots.pem ruby /var/local/git/grpc/src/ruby/bin/interop/interop_client.rb --use_tls --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com "
-  cmd="$cmd_prefix --test_case=$test_case"
-  sudo docker run grpc/ruby bin/bash -l -c '$cmd'
-elif [ "$language" = "php" ]
-then
-  sudo docker run -e SSL_CERT_FILE=/cacerts/roots.pem grpc/php /var/local/git/grpc/src/php/bin/interop_client.sh --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com --test_case=$test_case
-else
-  echo "interop testss not added for $language"
-  exit 1
-fi
+# If port env variable is set, run corresponding interop server on given port in background.
+# TODO(jtattermusch): ideally, run_interop_tests.py would generate the commands to run servers.
+
+[ -z "${SERVER_PORT_cxx}" ] || bins/opt/interop_server --enable_ssl --port=${SERVER_PORT_cxx} &
+
+[ -z "${SERVER_PORT_node}" ] || node src/node/interop/interop_server.js --use_tls=true --port=${SERVER_PORT_node} &
+
+[ -z "${SERVER_PORT_ruby}" ] || ruby src/ruby/bin/interop/interop_server.rb --use_tls --port=${SERVER_PORT_ruby} &
+
+[ -z "${SERVER_PORT_csharp}" ] || (cd src/csharp/Grpc.IntegrationTesting.Server/bin/Debug && mono Grpc.IntegrationTesting.Server.exe --use_tls --port=${SERVER_PORT_csharp}) &
 
+sleep infinity
diff --git a/tools/jenkins/docker_run_interop_tests.sh b/tools/jenkins/docker_run_interop_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..29970afb258e4b7897c117cf16b81c30a1bb4be0
--- /dev/null
+++ b/tools/jenkins/docker_run_interop_tests.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This script is invoked by build_docker_and_run_interop_tests.sh inside
+# a docker container. You should never need to call this script on your own.
+set -e
+
+nvm use 0.12
+rvm use ruby-2.1
+
+# run the cloud-to-prod interop tests
+$RUN_TESTS_COMMAND
diff --git a/tools/jenkins/docker_run_tests.sh b/tools/jenkins/docker_run_tests.sh
index 781bff26b93bd913696d902e9cd012d9d28470d3..3595a95f5ced43f3a1789310db858f16de669754 100755
--- a/tools/jenkins/docker_run_tests.sh
+++ b/tools/jenkins/docker_run_tests.sh
@@ -28,8 +28,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #
-# This script is invoked by build_docker_and_run_tests.py inside a docker
+# This script is invoked by build_docker_and_run_tests.sh inside a docker
 # container. You should never need to call this script on your own.
+
 set -e
 
 export CONFIG=$config
diff --git a/tools/jenkins/run_distribution.sh b/tools/jenkins/run_distribution.sh
index 49b7d306d16ba5ff4bbcc40e4a1b0d0448656e42..64c60f15027d6289a8a11a1f626a9931b5eded1e 100755
--- a/tools/jenkins/run_distribution.sh
+++ b/tools/jenkins/run_distribution.sh
@@ -54,7 +54,7 @@ if [ "$platform" == "linux" ]; then
     docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_linuxbrew
 
     # run per-language homebrew installation script
-    docker run $DOCKER_IMAGE_NAME bash -l \
+    docker run --rm=true $DOCKER_IMAGE_NAME bash -l \
       -c "nvm use 0.12; \
           npm set unsafe-perm true; \
           rvm use ruby-2.1; \
diff --git a/tools/jenkins/run_jenkins.sh b/tools/jenkins/run_jenkins.sh
index c4a01a7d6636298daacb8f2be54c33f05c77d6b6..f79e739f6aecb92648165c79f134918bb2ff7cf1 100755
--- a/tools/jenkins/run_jenkins.sh
+++ b/tools/jenkins/run_jenkins.sh
@@ -56,9 +56,6 @@ then
 
   ./tools/run_tests/run_tests.py --use_docker -t -l $language -c $config -x report.xml $@ || true
 
-elif [ "$platform" == "interop" ]
-then
-  python tools/run_tests/run_interops.py --language=$language $@
 elif [ "$platform" == "windows" ]
 then
   echo "building $language on Windows"
@@ -83,6 +80,12 @@ then
   echo "building $language on FreeBSD"
 
   MAKE=gmake ./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml $@ || true
+
+elif [ "$platform" == "interop" ]
+then
+  echo "building interop tests for language $language"
+
+  ./tools/run_tests/run_interop_tests.py --use_docker -t -l $language --cloud_to_prod --server all || true
 else
   echo "Unknown platform $platform"
   exit 1
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
new file mode 100755
index 0000000000000000000000000000000000000000..a2fb1243cc49e3a247234ba94e8bd36ae8231597
--- /dev/null
+++ b/tools/run_tests/run_interop_tests.py
@@ -0,0 +1,330 @@
+#!/usr/bin/env python
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run interop (cross-language) tests in parallel."""
+
+import argparse
+import itertools
+import xml.etree.cElementTree as ET
+import jobset
+import os
+import subprocess
+import sys
+import time
+
+
+_CLOUD_TO_PROD_BASE_ARGS = [
+    '--server_host_override=grpc-test.sandbox.google.com',
+    '--server_host=grpc-test.sandbox.google.com',
+    '--server_port=443']
+
+_CLOUD_TO_CLOUD_BASE_ARGS = [
+    '--server_host_override=foo.test.google.fr']
+
+# TOOD(jtattermusch) wrapped languages use this variable for location 
+# of roots.pem. We might want to use GRPC_DEFAULT_SSL_ROOTS_FILE_PATH
+# supported by C core SslCredentials instead.
+_SSL_CERT_ENV = { 'SSL_CERT_FILE':'/usr/local/share/grpc/roots.pem' }
+
+# TODO(jtatttermusch) unify usage of --enable_ssl, --use_tls and --use_tls=true
+
+
+class CXXLanguage:
+
+  def __init__(self):
+    self.client_cmdline_base = ['bins/opt/interop_client']
+    self.client_cwd = None
+
+  def cloud_to_prod_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
+            ['--enable_ssl','--use_prod_roots'])
+
+  def cloud_to_cloud_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
+            ['--enable_ssl'])
+
+  def cloud_to_prod_env(self):
+    return None
+
+  def __str__(self):
+    return 'c++'
+
+
+class CSharpLanguage:
+
+  def __init__(self):
+    self.client_cmdline_base = ['mono', 'Grpc.IntegrationTesting.Client.exe']
+    self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug'
+
+  def cloud_to_prod_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
+            ['--use_tls'])
+
+  def cloud_to_cloud_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
+            ['--use_tls', '--use_test_ca'])
+
+  def cloud_to_prod_env(self):
+    return _SSL_CERT_ENV
+
+  def __str__(self):
+    return 'csharp'
+
+
+class NodeLanguage:
+
+  def __init__(self):
+    self.client_cmdline_base = ['node', 'src/node/interop/interop_client.js']
+    self.client_cwd = None
+
+  def cloud_to_prod_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
+            ['--use_tls=true'])
+
+  def cloud_to_cloud_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
+            ['--use_tls=true', '--use_test_ca=true'])
+
+  def cloud_to_prod_env(self):
+    return _SSL_CERT_ENV
+
+  def __str__(self):
+    return 'node'
+
+
+class PHPLanguage:
+
+  def __init__(self):
+    self.client_cmdline_base = ['src/php/bin/interop_client.sh']
+    self.client_cwd = None
+
+  def cloud_to_prod_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
+            ['--use_tls'])
+
+  def cloud_to_cloud_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
+            ['--use_tls', '--use_test_ca'])
+
+  def cloud_to_prod_env(self):
+    return _SSL_CERT_ENV
+
+  def __str__(self):
+    return 'php'
+
+
+class RubyLanguage:
+
+  def __init__(self):
+    self.client_cmdline_base = ['ruby', 'src/ruby/bin/interop/interop_client.rb']
+    self.client_cwd = None
+
+  def cloud_to_prod_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
+            ['--use_tls'])
+
+  def cloud_to_cloud_args(self):
+    return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
+            ['--use_tls', '--use_test_ca'])
+
+  def cloud_to_prod_env(self):
+    return _SSL_CERT_ENV
+
+  def __str__(self):
+    return 'ruby'
+
+
+# TODO(jtattermusch): add php and python once we get them working
+_LANGUAGES = {
+    'c++' : CXXLanguage(),
+    'csharp' : CSharpLanguage(),
+    'node' : NodeLanguage(),
+    'php' :  PHPLanguage(),
+    'ruby' : RubyLanguage(),
+}
+
+# languages supported as cloud_to_cloud servers 
+# TODO(jtattermusch): enable other languages as servers as well
+_SERVERS = { 'c++' : 8010, 'node' : 8040, 'csharp': 8070 }
+
+# TODO(jtattermusch): add empty_stream once C++ start supporting it.
+# TODO(jtattermusch): add support for auth tests.
+_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
+               'client_streaming', 'server_streaming',
+               'cancel_after_begin', 'cancel_after_first_response',
+               'timeout_on_sleeping_server']
+
+
+def cloud_to_prod_jobspec(language, test_case):
+  """Creates jobspec for cloud-to-prod interop test"""
+  cmdline = language.cloud_to_prod_args() + ['--test_case=%s' % test_case]
+  test_job = jobset.JobSpec(
+          cmdline=cmdline,
+          cwd=language.client_cwd,
+          shortname="cloud_to_prod:%s:%s" % (language, test_case),
+          environ=language.cloud_to_prod_env(),
+          timeout_seconds=60)
+  return test_job
+
+
+def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
+                           server_port):
+  """Creates jobspec for cloud-to-cloud interop test"""
+  cmdline = language.cloud_to_cloud_args() + ['--test_case=%s' % test_case,
+     '--server_host=%s' % server_host,
+     '--server_port=%s' % server_port ]
+  test_job = jobset.JobSpec(
+          cmdline=cmdline,
+          cwd=language.client_cwd,
+          shortname="cloud_to_cloud:%s:%s_server:%s" % (language, server_name,
+                                                 test_case),
+          timeout_seconds=60)
+  return test_job
+
+argp = argparse.ArgumentParser(description='Run interop tests.')
+argp.add_argument('-l', '--language',
+                  choices=['all'] + sorted(_LANGUAGES),
+                  nargs='+',
+                  default=['all'],
+                  help='Clients to run.')
+argp.add_argument('-j', '--jobs', default=24, type=int)
+argp.add_argument('--cloud_to_prod',
+                  default=False,
+                  action='store_const',
+                  const=True,
+                  help='Run cloud_to_prod tests.')
+argp.add_argument('-s', '--server',
+                  choices=['all'] + sorted(_SERVERS),
+                  action='append',
+                  help='Run cloud_to_cloud servers in a separate docker ' +
+                       'image. Servers can only be started automatically if ' +
+                       '--use_docker option is enabled.',
+                  default=[])
+argp.add_argument('--override_server',
+                  action='append',
+                  type=lambda kv: kv.split("="),
+                  help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+                  default=[])
+argp.add_argument('-t', '--travis',
+                  default=False,
+                  action='store_const',
+                  const=True)
+argp.add_argument('--use_docker',
+                  default=False,
+                  action='store_const',
+                  const=True,
+                  help='Run all the interop tests under docker. That provides ' +
+                  'additional isolation and prevents the need to install ' +
+                  'language specific prerequisites. Only available on Linux.')
+args = argp.parse_args()
+
+servers = set(s for s in itertools.chain.from_iterable(_SERVERS.iterkeys()
+                                                       if x == 'all' else [x]
+                                                       for x in args.server))
+
+if args.use_docker:
+  if not args.travis:
+    print 'Seen --use_docker flag, will run interop tests under docker.'
+    print
+    print 'IMPORTANT: The changes you are testing need to be locally committed'
+    print 'because only the committed changes in the current branch will be'
+    print 'copied to the docker environment.'
+    time.sleep(5)
+
+  child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
+  run_tests_cmd = ('tools/run_tests/run_interop_tests.py %s' %
+                   " ".join(child_argv[1:]))
+
+  # cmdline args to pass to the container running servers.
+  servers_extra_docker_args = ''
+  server_port_tuples = ''
+  for server in servers:
+    port = _SERVERS[server]
+    servers_extra_docker_args += ' -p %s' % port
+    servers_extra_docker_args += ' -e SERVER_PORT_%s=%s' % (server.replace("+", "x"), port)
+    server_port_tuples += ' %s:%s' % (server, port)
+
+  env = os.environ.copy()
+  env['RUN_TESTS_COMMAND'] = run_tests_cmd
+  env['SERVERS_DOCKER_EXTRA_ARGS'] = servers_extra_docker_args
+  env['SERVER_PORT_TUPLES'] = server_port_tuples
+  if not args.travis:
+    env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
+
+  subprocess.check_call(['tools/jenkins/build_docker_and_run_interop_tests.sh'],
+                        shell=True,
+                        env=env)
+  sys.exit(0)
+
+languages = set(_LANGUAGES[l]
+                for l in itertools.chain.from_iterable(
+                      _LANGUAGES.iterkeys() if x == 'all' else [x]
+                      for x in args.language))
+
+jobs = []
+if args.cloud_to_prod:
+  for language in languages:
+    for test_case in _TEST_CASES:
+      test_job = cloud_to_prod_jobspec(language, test_case)
+      jobs.append(test_job)
+
+# default servers to "localhost" and the default port
+server_addresses = dict((s, ("localhost", _SERVERS[s])) for s in servers)
+
+for server in args.override_server:
+  server_name = server[0]
+  (server_host, server_port) = server[1].split(":")
+  server_addresses[server_name] = (server_host, server_port)
+
+for server_name, server_address in server_addresses.iteritems():
+  (server_host, server_port) = server_address
+  for language in languages:
+    for test_case in _TEST_CASES:
+      test_job = cloud_to_cloud_jobspec(language,
+                                        test_case,
+                                        server_name,
+                                        server_host,
+                                        server_port)
+      jobs.append(test_job)
+
+if not jobs:
+  print "No jobs to run."
+  sys.exit(1)
+
+root = ET.Element('testsuites')
+testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
+
+if jobset.run(jobs, newline_on_success=True, maxjobs=args.jobs, xml_report=testsuite):
+  jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+else:
+  jobset.message('FAILED', 'Some tests failed', do_newline=True)
+
+tree = ET.ElementTree(root)
+tree.write('report.xml', encoding='UTF-8')
\ No newline at end of file
diff --git a/tools/run_tests/run_interops.py b/tools/run_tests/run_interops.py
deleted file mode 100755
index 17083975d8c14110d858b42b85d38109a0c867f9..0000000000000000000000000000000000000000
--- a/tools/run_tests/run_interops.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import argparse
-import xml.etree.cElementTree as ET
-import jobset
-
-argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('-l', '--language',
-                  default='c++')
-args = argp.parse_args()
-
-# build job
-build_job = jobset.JobSpec(cmdline=['tools/run_tests/run_interops_build.sh', '%s' % args.language],
-                           shortname='build',
-                           timeout_seconds=30*60)
-
-# test jobs, each test is a separate job to run in parallel
-_TESTS = ['large_unary', 'empty_unary', 'ping_pong', 'client_streaming', 'server_streaming']
-jobs = []
-jobNumber = 0
-for test in _TESTS:
-  test_job = jobset.JobSpec(
-        cmdline=['tools/run_tests/run_interops_test.sh', '%s' % args.language, '%s' % test], 
-        shortname=test,
-        timeout_seconds=15*60)
-  jobs.append(test_job)
-  jobNumber+=1
-
-root = ET.Element('testsuites')
-testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
-
-# always do the build of docker first, and then all the tests can run in parallel
-jobset.run([build_job], maxjobs=1, xml_report=testsuite)
-jobset.run(jobs, maxjobs=jobNumber, xml_report=testsuite)
-
-tree = ET.ElementTree(root)
-tree.write('report.xml', encoding='UTF-8')
-
-
diff --git a/tools/run_tests/run_interops_build.sh b/tools/run_tests/run_interops_build.sh
deleted file mode 100755
index ff1a26cf899afb037758455d514274ee18747e45..0000000000000000000000000000000000000000
--- a/tools/run_tests/run_interops_build.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh
-
-# Copyright 2015, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-#     * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-language=$1
-
-set -e
-
-#clean up any old docker files and start mirroring repository if not started already
-sudo docker rmi -f grpc/cxx || true
-sudo docker rmi -f grpc/base || true
-sudo docker rmi -f 0.0.0.0:5000/grpc/base || true
-sudo docker run -d -e GCS_BUCKET=docker-interop-images  -e STORAGE_PATH=/admin/docker_images -p 5000:5000 google/docker-registry || true
-
-#prepare building by pulling down base images and necessary files
-sudo docker pull 0.0.0.0:5000/grpc/base
-sudo docker tag -f 0.0.0.0:5000/grpc/base grpc/base
-
-if [ "$language" = "c++" ]
-then
-  gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_cxx
-  gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_cxx
-  sudo docker build --no-cache -t grpc/cxx tools/dockerfile/grpc_cxx
-elif [ "$language" = "node" ]
-then
-  sudo docker pull 0.0.0.0:5000/grpc/node_base
-  sudo docker tag -f 0.0.0.0:5000/grpc/node_base grpc/node_base
-  gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_node
-  gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_node
-  sudo docker build --no-cache -t grpc/node tools/dockerfile/grpc_node
-elif [ "$language" = "ruby" ]
-then
-  sudo docker pull 0.0.0.0:5000/grpc/ruby_base
-  sudo docker tag -f 0.0.0.0:5000/grpc/ruby_base grpc/ruby_base
-  gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_ruby
-  gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_ruby
-  sudo docker build --no-cache -t grpc/ruby tools/dockerfile/grpc_ruby
-elif [ "$language" = "php" ]
-then
-  sudo docker pull 0.0.0.0:5000/grpc/php_base
-  sudo docker tag -f 0.0.0.0:5000/grpc/php_base grpc/php_base
-  gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_php
-  gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_php
-  sudo docker build --no-cache -t grpc/php tools/dockerfile/grpc_php
-else
-  echo "interop testss not added for $language"
-  exit 1
-fi
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 73b2b3640b30f359c1ecbf04ee5a9fcf6d840e33..38c06764b9b5c81155754ec8ba7cb93bfdbebb73 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -557,7 +557,7 @@ argp.add_argument('--use_docker',
                   action='store_const',
                   const=True,
                   help="Run all the tests under docker. That provides " +
-                  "additional isolation and prevents the need to installs " +
+                  "additional isolation and prevents the need to install " +
                   "language specific prerequisites. Only available on Linux.")
 argp.add_argument('--allow_flakes',
                   default=False,