diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c
index fbe5a33f1513e2e92de9a5039abdec74a6684f58..317892923901cf6379f6b01788beb36b6569e011 100644
--- a/src/core/ext/client_config/client_channel.c
+++ b/src/core/ext/client_config/client_channel.c
@@ -444,16 +444,16 @@ typedef struct client_channel_call_data {
   // stack and each has its own mutex.  If/when we have time, find a way
   // to avoid this without breaking the grpc_deadline_state abstraction.
   grpc_deadline_state deadline_state;
-  gpr_timespec deadline;
 
+  grpc_mdstr *path;  // Request path.
+  gpr_timespec call_start_time;
+  gpr_timespec deadline;
   enum {
     WAIT_FOR_READY_UNSET,
     WAIT_FOR_READY_FALSE,
     WAIT_FOR_READY_TRUE
   } wait_for_ready_from_service_config;
-
-  // Request path.
-  grpc_mdstr *path;
+  grpc_closure read_service_config;
 
   grpc_error *cancel_error;
 
@@ -657,6 +657,20 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
     int r;
     GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
     gpr_mu_unlock(&chand->mu);
+    // If the application explicitly set wait_for_ready, use that.
+    // Otherwise, if the service config specified a value for this
+    // method, use that.
+    gpr_mu_lock(&calld->mu);
+    if ((initial_metadata_flags &
+         GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET) == 0 &&
+        calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET) {
+      if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
+        initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+      } else {
+        initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+      }
+    }
+    gpr_mu_unlock(&calld->mu);
     // TODO(dgq): make this deadline configurable somehow.
     const grpc_lb_policy_pick_args inputs = {
         calld->pollent, initial_metadata, initial_metadata_flags,
@@ -769,24 +783,12 @@ retry:
       calld->connected_subchannel == NULL &&
       op->send_initial_metadata != NULL) {
     calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
-    // If the application explicitly set wait_for_ready, use that.
-    // Otherwise, if the service config specified a value for this
-    // method, use that.
-    uint32_t initial_metadata_flags = op->send_initial_metadata_flags;
-    if ((initial_metadata_flags &
-         GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET) == 0 &&
-        calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET) {
-      if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
-        initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
-      } else {
-        initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
-      }
-    }
     grpc_closure_init(&calld->next_step, subchannel_ready, calld);
     GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
     if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
-                        initial_metadata_flags, &calld->connected_subchannel,
-                        &calld->next_step, GRPC_ERROR_NONE)) {
+                        op->send_initial_metadata_flags,
+                        &calld->connected_subchannel, &calld->next_step,
+                        GRPC_ERROR_NONE)) {
       calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
       GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
     }
@@ -814,36 +816,69 @@ retry:
   GPR_TIMER_END("cc_start_transport_stream_op", 0);
 }
 
-/* Constructor for call_data */
-static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
-                                     grpc_call_element *elem,
-                                     grpc_call_element_args *args) {
+// Gets data from the service config.  Invoked when the resolver returns
+// its initial result.
+static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
+                                grpc_error *error) {
+  grpc_call_element *elem = arg;
   channel_data *chand = elem->channel_data;
   call_data *calld = elem->call_data;
+  // If this is an error, there's no point in looking at the service config.
+  if (error != GRPC_ERROR_NONE) return;
+  // Get the method config table from channel data.
   gpr_mu_lock(&chand->mu);
-  grpc_method_config_table *method_config_table =
-      chand->method_config_table == NULL
-          ? NULL
-          : grpc_method_config_table_ref(chand->method_config_table);
-  gpr_mu_unlock(&chand->mu);
-  grpc_method_config *method_config =
-      method_config_table == NULL ? NULL
-                                  : grpc_method_config_table_get_method_config(
-                                        method_config_table, args->path);
-  grpc_deadline_state_init(exec_ctx, elem, args, method_config);
-  calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
-  if (method_config != NULL) {
-    bool *wait_for_ready = grpc_method_config_get_wait_for_ready(method_config);
-    if (wait_for_ready != NULL) {
-      calld->wait_for_ready_from_service_config =
-          *wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
-    }
+  grpc_method_config_table *method_config_table = NULL;
+  if (chand->method_config_table != NULL) {
+    method_config_table =
+        grpc_method_config_table_ref(chand->method_config_table);
   }
+  gpr_mu_unlock(&chand->mu);
+  // If the method config table was present, use it.
   if (method_config_table != NULL) {
+    grpc_method_config *method_config =
+        grpc_method_config_table_get_method_config(method_config_table,
+                                                   calld->path);
+    if (method_config != NULL) {
+      gpr_timespec *per_method_timeout =
+          grpc_method_config_get_timeout(method_config);
+      bool *wait_for_ready =
+          grpc_method_config_get_wait_for_ready(method_config);
+      if (per_method_timeout != NULL || wait_for_ready != NULL) {
+        gpr_mu_lock(&calld->mu);
+        if (per_method_timeout != NULL) {
+          gpr_timespec per_method_deadline =
+              gpr_time_add(calld->call_start_time, *per_method_timeout);
+          if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+            calld->deadline = per_method_deadline;
+            // Reset deadline timer.
+            grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+          }
+        }
+        if (wait_for_ready != NULL) {
+          calld->wait_for_ready_from_service_config =
+              *wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
+        }
+        gpr_mu_unlock(&calld->mu);
+      }
+    }
     grpc_method_config_table_unref(method_config_table);
   }
-  calld->deadline = args->deadline;
+}
+
+/* Constructor for call_data */
+static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
+                                     grpc_call_element *elem,
+                                     grpc_call_element_args *args) {
+  channel_data *chand = elem->channel_data;
+  call_data *calld = elem->call_data;
+  // Initialize data members.
+  grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
   calld->path = GRPC_MDSTR_REF(args->path);
+  // TODO(roth): Is there a better value to use here for the actual start
+  // time of the call (i.e., something initialized at the surface layer)?
+  calld->call_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+  calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+  calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
   calld->cancel_error = GRPC_ERROR_NONE;
   gpr_atm_rel_store(&calld->subchannel_call, 0);
   gpr_mu_init(&calld->mu);
@@ -854,6 +889,53 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
   calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
   calld->owning_call = args->call_stack;
   calld->pollent = NULL;
+  // If the resolver has already returned results, then we can access
+  // the service config parameters immediately.  Otherwise, we need to
+  // defer that work until the resolver returns an initial result.
+  // TODO(roth): This code is almost but not quite identical to the code
+  // in read_service_config() above.  It would be nice to find a way to
+  // combine them, to avoid having to maintain it twice.
+  gpr_mu_lock(&chand->mu);
+  if (chand->lb_policy != NULL) {
+    // We already have a resolver result, so check for service config.
+    if (chand->method_config_table != NULL) {
+      grpc_method_config_table *method_config_table =
+          grpc_method_config_table_ref(chand->method_config_table);
+      gpr_mu_unlock(&chand->mu);
+      grpc_method_config *method_config =
+          grpc_method_config_table_get_method_config(method_config_table,
+                                                     args->path);
+      if (method_config != NULL) {
+        gpr_timespec *per_method_timeout =
+            grpc_method_config_get_timeout(method_config);
+        if (per_method_timeout != NULL) {
+          gpr_timespec per_method_deadline =
+              gpr_time_add(calld->call_start_time, *per_method_timeout);
+          calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
+        }
+        bool *wait_for_ready =
+            grpc_method_config_get_wait_for_ready(method_config);
+        if (wait_for_ready != NULL) {
+          calld->wait_for_ready_from_service_config =
+              *wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
+        }
+      }
+      grpc_method_config_table_unref(method_config_table);
+    } else {
+      gpr_mu_unlock(&chand->mu);
+    }
+  } else {
+    // We don't yet have a resolver result, so register a callback to
+    // get the service config data once the resolver returns.
+    grpc_closure_init(&calld->read_service_config, read_service_config, elem);
+    grpc_closure_list_append(&chand->waiting_for_config_closures,
+                             &calld->read_service_config, GRPC_ERROR_NONE);
+    gpr_mu_unlock(&chand->mu);
+  }
+  // Start the deadline timer with the current deadline value.  If we
+  // do not yet have service config data, then the timer may be reset
+  // later.
+  grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
   return GRPC_ERROR_NONE;
 }
 
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 5216338833dcdb79463347afffb03b0cb21e36b2..d2ea5250f6c96ff2fd0cf212880299406435f994 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -64,30 +64,49 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
 }
 
 // Starts the deadline timer.
-static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
-                                  gpr_timespec deadline) {
+static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
+                                         grpc_call_element* elem,
+                                         gpr_timespec deadline) {
   grpc_deadline_state* deadline_state = elem->call_data;
   deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
-  if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
+  // Note: We do not start the timer if there is already a timer
+  // pending.  This should be okay, because this is only called from two
+  // functions exported by this module: grpc_deadline_state_start(), which
+  // starts the initial timer, and grpc_deadline_state_reset(), which
+  // cancels any pre-existing timer before starting a new one.  In
+  // particular, we want to ensure that if grpc_deadline_state_start()
+  // winds up trying to start the timer after grpc_deadline_state_reset()
+  // has already done so, we ignore the value from the former.
+  if (!deadline_state->timer_pending &&
+      gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
     // Take a reference to the call stack, to be owned by the timer.
     GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
-    gpr_mu_lock(&deadline_state->timer_mu);
     deadline_state->timer_pending = true;
     grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
                     elem, gpr_now(GPR_CLOCK_MONOTONIC));
-    gpr_mu_unlock(&deadline_state->timer_mu);
   }
 }
+static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
+                                  grpc_call_element* elem,
+                                  gpr_timespec deadline) {
+  grpc_deadline_state* deadline_state = elem->call_data;
+  gpr_mu_lock(&deadline_state->timer_mu);
+  start_timer_if_needed_locked(exec_ctx, elem, deadline);
+  gpr_mu_unlock(&deadline_state->timer_mu);
+}
 
 // Cancels the deadline timer.
-static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
-                                   grpc_deadline_state* deadline_state) {
-  gpr_mu_lock(&deadline_state->timer_mu);
+static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
+                                          grpc_deadline_state* deadline_state) {
   if (deadline_state->timer_pending) {
     grpc_timer_cancel(exec_ctx, &deadline_state->timer);
     deadline_state->timer_pending = false;
   }
+}
+static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
+                                   grpc_deadline_state* deadline_state) {
+  gpr_mu_lock(&deadline_state->timer_mu);
+  cancel_timer_if_needed_locked(exec_ctx, deadline_state);
   gpr_mu_unlock(&deadline_state->timer_mu);
 }
 
@@ -108,6 +127,21 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
   op->on_complete = &deadline_state->on_complete;
 }
 
+void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+                              grpc_call_stack* call_stack) {
+  grpc_deadline_state* deadline_state = elem->call_data;
+  memset(deadline_state, 0, sizeof(*deadline_state));
+  deadline_state->call_stack = call_stack;
+  gpr_mu_init(&deadline_state->timer_mu);
+}
+
+void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
+                                 grpc_call_element* elem) {
+  grpc_deadline_state* deadline_state = elem->call_data;
+  cancel_timer_if_needed(exec_ctx, deadline_state);
+  gpr_mu_destroy(&deadline_state->timer_mu);
+}
+
 // Callback and associated state for starting the timer after call stack
 // initialization has been completed.
 struct start_timer_after_init_state {
@@ -122,24 +156,11 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
   gpr_free(state);
 }
 
-void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-                              grpc_call_element_args* args,
-                              grpc_method_config* method_config) {
-  grpc_deadline_state* deadline_state = elem->call_data;
-  memset(deadline_state, 0, sizeof(*deadline_state));
-  deadline_state->call_stack = args->call_stack;
-  gpr_mu_init(&deadline_state->timer_mu);
+void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+                               gpr_timespec deadline) {
   // Deadline will always be infinite on servers, so the timer will only be
   // set on clients with a finite deadline.
-  gpr_timespec deadline =
-      gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
-  if (method_config != NULL) {
-    gpr_timespec* per_method_deadline =
-        grpc_method_config_get_timeout(method_config);
-    if (per_method_deadline != NULL) {
-      deadline = gpr_time_min(deadline, *per_method_deadline);
-    }
-  }
+  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
   if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
     // When the deadline passes, we indicate the failure by sending down
     // an op with cancel_error set.  However, we can't send down any ops
@@ -156,11 +177,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
   }
 }
 
-void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_element* elem) {
+void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+                               gpr_timespec new_deadline) {
   grpc_deadline_state* deadline_state = elem->call_data;
-  cancel_timer_if_needed(exec_ctx, deadline_state);
-  gpr_mu_destroy(&deadline_state->timer_mu);
+  gpr_mu_lock(&deadline_state->timer_mu);
+  cancel_timer_if_needed_locked(exec_ctx, deadline_state);
+  start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
+  gpr_mu_unlock(&deadline_state->timer_mu);
 }
 
 void grpc_deadline_state_client_start_transport_stream_op(
@@ -217,7 +240,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
                                   grpc_call_element_args* args) {
   // Note: size of call data is different between client and server.
   memset(elem->call_data, 0, elem->filter->sizeof_call_data);
-  grpc_deadline_state_init(exec_ctx, elem, args, NULL /* method_config */);
+  grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
+  grpc_deadline_state_start(exec_ctx, elem, args->deadline);
   return GRPC_ERROR_NONE;
 }
 
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index ce6e8ea974dda7ff0d746709c821d7f8b9b403f1..9fd38027215354d3d2db9c9bb011011a58ec66e6 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -56,19 +56,37 @@ typedef struct grpc_deadline_state {
   grpc_closure* next_on_complete;
 } grpc_deadline_state;
 
-// To be used in a filter's init_call_elem(), destroy_call_elem(), and
-// start_transport_stream_op() methods to enforce call deadlines.
 //
-// REQUIRES: The first field in elem->call_data is a grpc_deadline_state.
+// NOTE: All of these functions require that the first field in
+// elem->call_data is a grpc_deadline_state.
 //
-// For grpc_deadline_state_client_start_transport_stream_op(), it is the
-// caller's responsibility to chain to the next filter if necessary
-// after the function returns.
+
 void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-                              grpc_call_element_args* args,
-                              grpc_method_config* method_config);
+                              grpc_call_stack* call_stack);
 void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
                                  grpc_call_element* elem);
+
+// Starts the timer with the specified deadline.
+// Should be called from the filter's init_call_elem() method.
+void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+                               gpr_timespec deadline);
+
+// Cancels the existing timer and starts a new one with new_deadline.
+//
+// Note: It is generally safe to call this with an earlier deadline
+// value than the current one, but not the reverse.  No checks are done
+// to ensure that the timer callback is not invoked while it is in the
+// process of being reset, which means that attempting to increase the
+// deadline may result in the timer being called twice.
+void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+                               gpr_timespec new_deadline);
+
+// To be called from the client-side filter's start_transport_stream_op()
+// method.  Ensures that the deadline timer is cancelled when the call
+// is completed.
+//
+// Note: It is the caller's responsibility to chain to the next filter if
+// necessary after this function returns.
 void grpc_deadline_state_client_start_transport_stream_op(
     grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
     grpc_transport_stream_op* op);
diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c
index cb5b9e52b7bc139ab19511baf5f7210a7776c5f2..32dc9e2711dadd911849053c9c255290b091c68b 100644
--- a/test/core/end2end/fake_resolver.c
+++ b/test/core/end2end/fake_resolver.c
@@ -203,7 +203,7 @@ static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory,
     const char* timeout_str =
         grpc_uri_get_query_arg(args->uri, "timeout_seconds");
     gpr_timespec timeout = {timeout_str == NULL ? 0 : atoi(timeout_str), 0,
-                            GPR_CLOCK_MONOTONIC};
+                            GPR_TIMESPAN};
     const char* max_request_message_bytes_str =
         grpc_uri_get_query_arg(args->uri, "max_request_message_bytes");
     int32_t max_request_message_bytes =
diff --git a/test/core/end2end/tests/cancel_after_accept.c b/test/core/end2end/tests/cancel_after_accept.c
index 8e2c9a0aa4bab837243101275ba3138f3475f18c..9f4981552710891f990e913f2f24ff2d1fe8ec42 100644
--- a/test/core/end2end/tests/cancel_after_accept.c
+++ b/test/core/end2end/tests/cancel_after_accept.c
@@ -49,12 +49,13 @@ static void *tag(intptr_t t) { return (void *)t; }
 static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
                                             const char *test_name,
                                             grpc_channel_args *client_args,
-                                            grpc_channel_args *server_args) {
+                                            grpc_channel_args *server_args,
+                                            const char *query_args) {
   grpc_end2end_test_fixture f;
   gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
   f = config.create_fixture(client_args, server_args);
   config.init_server(&f, server_args);
-  config.init_client(&f, client_args, NULL);
+  config.init_client(&f, client_args, query_args);
   return f;
 }
 
@@ -98,15 +99,15 @@ static void end_test(grpc_end2end_test_fixture *f) {
 
 /* Cancel after accept, no payload */
 static void test_cancel_after_accept(grpc_end2end_test_config config,
-                                     cancellation_mode mode) {
+                                     cancellation_mode mode,
+                                     bool use_service_config) {
   grpc_op ops[6];
   grpc_op *op;
   grpc_call *c;
   grpc_call *s;
-  grpc_end2end_test_fixture f =
-      begin_test(config, "cancel_after_accept", NULL, NULL);
-  gpr_timespec deadline = five_seconds_time();
-  cq_verifier *cqv = cq_verifier_create(f.cq);
+  gpr_timespec deadline = use_service_config
+                              ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
+                              : five_seconds_time();
   grpc_metadata_array initial_metadata_recv;
   grpc_metadata_array trailing_metadata_recv;
   grpc_metadata_array request_metadata_recv;
@@ -125,8 +126,19 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
       grpc_raw_byte_buffer_create(&response_payload_slice, 1);
   int was_cancelled = 2;
 
+  const char *query_args = NULL;
+  if (use_service_config) {
+    query_args =
+        "method_name=/service/method"
+        "&timeout_seconds=5";
+  }
+  grpc_end2end_test_fixture f =
+      begin_test(config, "cancel_after_accept", NULL, NULL, query_args);
+  cq_verifier *cqv = cq_verifier_create(f.cq);
+
   c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
-                               "/foo", "foo.test.google.fr", deadline, NULL);
+                               "/service/method", "foo.test.google.fr",
+                               deadline, NULL);
   GPR_ASSERT(c);
 
   grpc_metadata_array_init(&initial_metadata_recv);
@@ -230,7 +242,18 @@ void cancel_after_accept(grpc_end2end_test_config config) {
   unsigned i;
 
   for (i = 0; i < GPR_ARRAY_SIZE(cancellation_modes); i++) {
-    test_cancel_after_accept(config, cancellation_modes[i]);
+    test_cancel_after_accept(config, cancellation_modes[i],
+                             false /* use_service_config */);
+  }
+
+  if (config.feature_mask & FEATURE_MASK_SUPPORTS_QUERY_ARGS) {
+    for (i = 0; i < GPR_ARRAY_SIZE(cancellation_modes); i++) {
+      if (cancellation_modes[i].expect_status ==
+          GRPC_STATUS_DEADLINE_EXCEEDED) {
+        test_cancel_after_accept(config, cancellation_modes[i],
+                                 true /* use_service_config */);
+      }
+    }
   }
 }