diff --git a/src/core/lib/iomgr/internal_errqueue.cc b/src/core/lib/iomgr/internal_errqueue.cc index 4e2bfe3ccd5540877785d58ceb48dd940b481a4b..b68c66b7575b4ee6d46dd495f8bb3477b3e9ca3c 100644 --- a/src/core/lib/iomgr/internal_errqueue.cc +++ b/src/core/lib/iomgr/internal_errqueue.cc @@ -36,7 +36,7 @@ static bool errqueue_supported = false; bool kernel_supports_errqueue() { return errqueue_supported; } void grpc_errqueue_init() { -/* Both-compile time and run-time linux kernel versions should be atleast 4.0.0 +/* Both-compile time and run-time linux kernel versions should be at least 4.0.0 */ #ifdef GRPC_LINUX_ERRQUEUE struct utsname buffer; diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc index 6a925add80c3effedaa3f3a38b4a77ef0f9cad7e..4bf86b79551359a6d6a733c14dc3eadcf5e41408 100644 --- a/src/core/lib/iomgr/timer_generic.cc +++ b/src/core/lib/iomgr/timer_generic.cc @@ -487,7 +487,7 @@ static void timer_cancel(grpc_timer* timer) { /* Rebalances the timer shard by computing a new 'queue_deadline_cap' and moving all relevant timers in shard->list (i.e timers with deadlines earlier than 'queue_deadline_cap') into into shard->heap. - Returns 'true' if shard->heap has atleast ONE element + Returns 'true' if shard->heap has at least ONE element REQUIRES: shard->mu locked */ static bool refill_heap(timer_shard* shard, grpc_millis now) { /* Compute the new queue window width and bound by the limits: */ diff --git a/test/core/bad_client/bad_client.cc b/test/core/bad_client/bad_client.cc index 6b492523219b586f330dfe7115dea96ce2f5288e..26550a2a70124c981e0733450bdc30b093ae1b9d 100644 --- a/test/core/bad_client/bad_client.cc +++ b/test/core/bad_client/bad_client.cc @@ -257,7 +257,7 @@ bool client_connection_preface_validator(grpc_slice_buffer* incoming, return false; } grpc_slice slice = incoming->slices[0]; - /* There should be atleast a settings frame present */ + /* There should be at least one settings frame present */ if (GRPC_SLICE_LENGTH(slice) < MIN_HTTP2_FRAME_SIZE) { return false; } diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc index 54455350c240ed9ddbcd8eb7a46ae8df06a53a8b..329eaf2434e6d2767cff345eb188c4efd30e8a71 100644 --- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc +++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc @@ -138,7 +138,7 @@ static void teardown() { Setup: The benchmark framework ensures that none of the threads proceed beyond the state.KeepRunning() call unless all the threads have called state.keepRunning - atleast once. So it is safe to do the initialization in one of the threads + at least once. So it is safe to do the initialization in one of the threads before state.KeepRunning() is called. Teardown: