Skip to content
Snippets Groups Projects
Commit 3a6fe1b2 authored by Nicolas Noble's avatar Nicolas Noble
Browse files

Merge pull request #592 from ctiller/chex2

Fix TSAN reported error in fd_posix.c
parents c4641f7f f95e37f3
No related branches found
No related tags found
No related merge requests found
...@@ -45,7 +45,10 @@ ...@@ -45,7 +45,10 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
enum descriptor_state { NOT_READY, READY, WAITING }; enum descriptor_state {
NOT_READY = 0,
READY = 1
}; /* or a pointer to a closure to call */
/* We need to keep a freelist not because of any concerns of malloc performance /* We need to keep a freelist not because of any concerns of malloc performance
* but instead so that implementations with multiple threads in (for example) * but instead so that implementations with multiple threads in (for example)
...@@ -88,8 +91,8 @@ static grpc_fd *alloc_fd(int fd) { ...@@ -88,8 +91,8 @@ static grpc_fd *alloc_fd(int fd) {
gpr_mu_init(&r->watcher_mu); gpr_mu_init(&r->watcher_mu);
} }
gpr_atm_rel_store(&r->refst, 1); gpr_atm_rel_store(&r->refst, 1);
gpr_atm_rel_store(&r->readst.state, NOT_READY); gpr_atm_rel_store(&r->readst, NOT_READY);
gpr_atm_rel_store(&r->writest.state, NOT_READY); gpr_atm_rel_store(&r->writest, NOT_READY);
gpr_atm_rel_store(&r->shutdown, 0); gpr_atm_rel_store(&r->shutdown, 0);
r->fd = fd; r->fd = fd;
r->watcher_root.next = r->watcher_root.prev = &r->watcher_root; r->watcher_root.next = r->watcher_root.prev = &r->watcher_root;
...@@ -166,11 +169,6 @@ void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); } ...@@ -166,11 +169,6 @@ void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); } void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
typedef struct {
grpc_iomgr_cb_func cb;
void *arg;
} callback;
static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success, static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success,
int allow_synchronous_callback) { int allow_synchronous_callback) {
if (allow_synchronous_callback) { if (allow_synchronous_callback) {
...@@ -180,18 +178,18 @@ static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success, ...@@ -180,18 +178,18 @@ static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success,
} }
} }
static void make_callbacks(callback *callbacks, size_t n, int success, static void make_callbacks(grpc_iomgr_closure *callbacks, size_t n, int success,
int allow_synchronous_callback) { int allow_synchronous_callback) {
size_t i; size_t i;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
make_callback(callbacks[i].cb, callbacks[i].arg, success, make_callback(callbacks[i].cb, callbacks[i].cb_arg, success,
allow_synchronous_callback); allow_synchronous_callback);
} }
} }
static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb, static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
void *arg, int allow_synchronous_callback) { int allow_synchronous_callback) {
switch ((enum descriptor_state)gpr_atm_acq_load(&st->state)) { switch (gpr_atm_acq_load(st)) {
case NOT_READY: case NOT_READY:
/* There is no race if the descriptor is already ready, so we skip /* There is no race if the descriptor is already ready, so we skip
the interlocked op in that case. As long as the app doesn't the interlocked op in that case. As long as the app doesn't
...@@ -199,9 +197,7 @@ static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb, ...@@ -199,9 +197,7 @@ static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb,
oldval should never be anything other than READY or NOT_READY. We oldval should never be anything other than READY or NOT_READY. We
don't don't
check for user error on the fast path. */ check for user error on the fast path. */
st->cb = cb; if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
st->cb_arg = arg;
if (gpr_atm_rel_cas(&st->state, NOT_READY, WAITING)) {
/* swap was successful -- the closure will run after the next /* swap was successful -- the closure will run after the next
set_ready call. NOTE: we don't have an ABA problem here, set_ready call. NOTE: we don't have an ABA problem here,
since we should never have concurrent calls to the same since we should never have concurrent calls to the same
...@@ -212,12 +208,13 @@ static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb, ...@@ -212,12 +208,13 @@ static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb,
/* swap was unsuccessful due to an intervening set_ready call. /* swap was unsuccessful due to an intervening set_ready call.
Fall through to the READY code below */ Fall through to the READY code below */
case READY: case READY:
assert(gpr_atm_acq_load(&st->state) == READY); assert(gpr_atm_acq_load(st) == READY);
gpr_atm_rel_store(&st->state, NOT_READY); gpr_atm_rel_store(st, NOT_READY);
make_callback(cb, arg, !gpr_atm_acq_load(&fd->shutdown), make_callback(closure->cb, closure->cb_arg,
!gpr_atm_acq_load(&fd->shutdown),
allow_synchronous_callback); allow_synchronous_callback);
return; return;
case WAITING: default: /* WAITING */
/* upcallptr was set to a different closure. This is an error! */ /* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"User called a notify_on function with a previous callback still " "User called a notify_on function with a previous callback still "
...@@ -228,38 +225,38 @@ static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb, ...@@ -228,38 +225,38 @@ static void notify_on(grpc_fd *fd, grpc_fd_state *st, grpc_iomgr_cb_func cb,
abort(); abort();
} }
static void set_ready_locked(grpc_fd_state *st, callback *callbacks, static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure *callbacks,
size_t *ncallbacks) { size_t *ncallbacks) {
callback *c; gpr_intptr state = gpr_atm_acq_load(st);
switch ((enum descriptor_state)gpr_atm_acq_load(&st->state)) { switch (state) {
case READY:
/* duplicate ready, ignore */
return;
case NOT_READY: case NOT_READY:
if (gpr_atm_rel_cas(&st->state, NOT_READY, READY)) { if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
/* swap was successful -- the closure will run after the next /* swap was successful -- the closure will run after the next
notify_on call. */ notify_on call. */
return; return;
} }
/* swap was unsuccessful due to an intervening set_ready call. /* swap was unsuccessful due to an intervening set_ready call.
Fall through to the WAITING code below */ Fall through to the WAITING code below */
case WAITING: state = gpr_atm_acq_load(st);
assert(gpr_atm_acq_load(&st->state) == WAITING); default: /* waiting */
c = &callbacks[(*ncallbacks)++]; assert(gpr_atm_acq_load(st) != READY &&
c->cb = st->cb; gpr_atm_acq_load(st) != NOT_READY);
c->arg = st->cb_arg; callbacks[(*ncallbacks)++] = *(grpc_iomgr_closure *)state;
gpr_atm_rel_store(&st->state, NOT_READY); gpr_atm_rel_store(st, NOT_READY);
return;
case READY:
/* duplicate ready, ignore */
return; return;
} }
} }
static void set_ready(grpc_fd *fd, grpc_fd_state *st, static void set_ready(grpc_fd *fd, gpr_atm *st,
int allow_synchronous_callback) { int allow_synchronous_callback) {
/* only one set_ready can be active at once (but there may be a racing /* only one set_ready can be active at once (but there may be a racing
notify_on) */ notify_on) */
int success; int success;
callback cb; grpc_iomgr_closure cb;
size_t ncb = 0; size_t ncb = 0;
gpr_mu_lock(&fd->set_state_mu); gpr_mu_lock(&fd->set_state_mu);
set_ready_locked(st, &cb, &ncb); set_ready_locked(st, &cb, &ncb);
...@@ -269,7 +266,7 @@ static void set_ready(grpc_fd *fd, grpc_fd_state *st, ...@@ -269,7 +266,7 @@ static void set_ready(grpc_fd *fd, grpc_fd_state *st,
} }
void grpc_fd_shutdown(grpc_fd *fd) { void grpc_fd_shutdown(grpc_fd *fd) {
callback cb[2]; grpc_iomgr_closure cb[2];
size_t ncb = 0; size_t ncb = 0;
gpr_mu_lock(&fd->set_state_mu); gpr_mu_lock(&fd->set_state_mu);
GPR_ASSERT(!gpr_atm_acq_load(&fd->shutdown)); GPR_ASSERT(!gpr_atm_acq_load(&fd->shutdown));
...@@ -280,14 +277,12 @@ void grpc_fd_shutdown(grpc_fd *fd) { ...@@ -280,14 +277,12 @@ void grpc_fd_shutdown(grpc_fd *fd) {
make_callbacks(cb, ncb, 0, 0); make_callbacks(cb, ncb, 0, 0);
} }
void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_cb_func read_cb, void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
void *read_cb_arg) { notify_on(fd, &fd->readst, closure, 0);
notify_on(fd, &fd->readst, read_cb, read_cb_arg, 0);
} }
void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_cb_func write_cb, void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
void *write_cb_arg) { notify_on(fd, &fd->writest, closure, 0);
notify_on(fd, &fd->writest, write_cb, write_cb_arg, 0);
} }
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
...@@ -305,8 +300,8 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, ...@@ -305,8 +300,8 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
watcher->fd = fd; watcher->fd = fd;
gpr_mu_unlock(&fd->watcher_mu); gpr_mu_unlock(&fd->watcher_mu);
return (gpr_atm_acq_load(&fd->readst.state) != READY ? read_mask : 0) | return (gpr_atm_acq_load(&fd->readst) != READY ? read_mask : 0) |
(gpr_atm_acq_load(&fd->writest.state) != READY ? write_mask : 0); (gpr_atm_acq_load(&fd->writest) != READY ? write_mask : 0);
} }
void grpc_fd_end_poll(grpc_fd_watcher *watcher) { void grpc_fd_end_poll(grpc_fd_watcher *watcher) {
......
...@@ -43,9 +43,7 @@ ...@@ -43,9 +43,7 @@
typedef struct { typedef struct {
grpc_iomgr_cb_func cb; grpc_iomgr_cb_func cb;
void *cb_arg; void *cb_arg;
int success; } grpc_iomgr_closure;
gpr_atm state;
} grpc_fd_state;
typedef struct grpc_fd grpc_fd; typedef struct grpc_fd grpc_fd;
...@@ -71,8 +69,8 @@ struct grpc_fd { ...@@ -71,8 +69,8 @@ struct grpc_fd {
gpr_mu watcher_mu; gpr_mu watcher_mu;
grpc_fd_watcher watcher_root; grpc_fd_watcher watcher_root;
grpc_fd_state readst; gpr_atm readst;
grpc_fd_state writest; gpr_atm writest;
grpc_iomgr_cb_func on_done; grpc_iomgr_cb_func on_done;
void *on_done_user_data; void *on_done_user_data;
...@@ -126,12 +124,10 @@ void grpc_fd_shutdown(grpc_fd *fd); ...@@ -126,12 +124,10 @@ void grpc_fd_shutdown(grpc_fd *fd);
underlying platform. This means that users must drain fd in read_cb before underlying platform. This means that users must drain fd in read_cb before
calling notify_on_read again. Users are also expected to handle spurious calling notify_on_read again. Users are also expected to handle spurious
events, i.e read_cb is called while nothing can be readable from fd */ events, i.e read_cb is called while nothing can be readable from fd */
void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_cb_func read_cb, void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure);
void *read_cb_arg);
/* Exactly the same semantics as above, except based on writable events. */ /* Exactly the same semantics as above, except based on writable events. */
void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_cb_func write_cb, void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure);
void *write_cb_arg);
/* Notification from the poller to an fd that it has become readable or /* Notification from the poller to an fd that it has become readable or
writable. writable.
......
...@@ -60,6 +60,7 @@ typedef struct { ...@@ -60,6 +60,7 @@ typedef struct {
gpr_timespec deadline; gpr_timespec deadline;
grpc_alarm alarm; grpc_alarm alarm;
int refs; int refs;
grpc_iomgr_closure write_closure;
} async_connect; } async_connect;
static int prepare_socket(const struct sockaddr *addr, int fd) { static int prepare_socket(const struct sockaddr *addr, int fd) {
...@@ -136,7 +137,7 @@ static void on_writable(void *acp, int success) { ...@@ -136,7 +137,7 @@ static void on_writable(void *acp, int success) {
opened too many network connections. The "easy" fix: opened too many network connections. The "easy" fix:
don't do that! */ don't do that! */
gpr_log(GPR_ERROR, "kernel out of buffers"); gpr_log(GPR_ERROR, "kernel out of buffers");
grpc_fd_notify_on_write(ac->fd, on_writable, ac); grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
return; return;
} else { } else {
switch (so_error) { switch (so_error) {
...@@ -229,9 +230,11 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep), ...@@ -229,9 +230,11 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
ac->fd = grpc_fd_create(fd); ac->fd = grpc_fd_create(fd);
gpr_mu_init(&ac->mu); gpr_mu_init(&ac->mu);
ac->refs = 2; ac->refs = 2;
ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac;
grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now()); grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now());
grpc_fd_notify_on_write(ac->fd, on_writable, ac); grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
} }
#endif #endif
...@@ -263,6 +263,9 @@ typedef struct { ...@@ -263,6 +263,9 @@ typedef struct {
void *write_user_data; void *write_user_data;
grpc_tcp_slice_state write_state; grpc_tcp_slice_state write_state;
grpc_iomgr_closure read_closure;
grpc_iomgr_closure write_closure;
} grpc_tcp; } grpc_tcp;
static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success); static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success);
...@@ -370,7 +373,7 @@ static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) { ...@@ -370,7 +373,7 @@ static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) {
} else { } else {
/* Spurious read event, consume it here */ /* Spurious read event, consume it here */
slice_state_destroy(&read_state); slice_state_destroy(&read_state);
grpc_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp); grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
} }
} else { } else {
/* TODO(klempner): Log interesting errors */ /* TODO(klempner): Log interesting errors */
...@@ -405,7 +408,7 @@ static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, ...@@ -405,7 +408,7 @@ static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
tcp->read_cb = cb; tcp->read_cb = cb;
tcp->read_user_data = user_data; tcp->read_user_data = user_data;
gpr_ref(&tcp->refcount); gpr_ref(&tcp->refcount);
grpc_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp); grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
} }
#define MAX_WRITE_IOVEC 16 #define MAX_WRITE_IOVEC 16
...@@ -468,7 +471,7 @@ static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) { ...@@ -468,7 +471,7 @@ static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) {
write_status = grpc_tcp_flush(tcp); write_status = grpc_tcp_flush(tcp);
if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
grpc_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp); grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
} else { } else {
slice_state_destroy(&tcp->write_state); slice_state_destroy(&tcp->write_state);
if (write_status == GRPC_ENDPOINT_WRITE_DONE) { if (write_status == GRPC_ENDPOINT_WRITE_DONE) {
...@@ -513,7 +516,7 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, ...@@ -513,7 +516,7 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep,
gpr_ref(&tcp->refcount); gpr_ref(&tcp->refcount);
tcp->write_cb = cb; tcp->write_cb = cb;
tcp->write_user_data = user_data; tcp->write_user_data = user_data;
grpc_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp); grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure);
} }
return status; return status;
...@@ -541,6 +544,10 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) { ...@@ -541,6 +544,10 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) {
/* paired with unref in grpc_tcp_destroy */ /* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
tcp->em_fd = em_fd; tcp->em_fd = em_fd;
tcp->read_closure.cb = grpc_tcp_handle_read;
tcp->read_closure.cb_arg = tcp;
tcp->write_closure.cb = grpc_tcp_handle_write;
tcp->write_closure.cb_arg = tcp;
return &tcp->base; return &tcp->base;
} }
......
...@@ -82,6 +82,7 @@ typedef struct { ...@@ -82,6 +82,7 @@ typedef struct {
struct sockaddr_un un; struct sockaddr_un un;
} addr; } addr;
int addr_len; int addr_len;
grpc_iomgr_closure read_closure;
} server_port; } server_port;
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) { static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
...@@ -244,7 +245,7 @@ static void on_read(void *arg, int success) { ...@@ -244,7 +245,7 @@ static void on_read(void *arg, int success) {
case EINTR: case EINTR:
continue; continue;
case EAGAIN: case EAGAIN:
grpc_fd_notify_on_read(sp->emfd, on_read, sp); grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
return; return;
default: default:
gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno)); gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
...@@ -393,7 +394,9 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets, ...@@ -393,7 +394,9 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets,
for (j = 0; j < pollset_count; j++) { for (j = 0; j < pollset_count; j++) {
grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd); grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);
} }
grpc_fd_notify_on_read(s->ports[i].emfd, on_read, &s->ports[i]); s->ports[i].read_closure.cb = on_read;
s->ports[i].read_closure.cb_arg = &s->ports[i];
grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure);
s->active_ports++; s->active_ports++;
} }
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
......
...@@ -97,6 +97,7 @@ typedef struct { ...@@ -97,6 +97,7 @@ typedef struct {
gpr_mu mu; /* protect done and done_cv */ gpr_mu mu; /* protect done and done_cv */
gpr_cv done_cv; /* signaled when a server finishes serving */ gpr_cv done_cv; /* signaled when a server finishes serving */
int done; /* set to 1 when a server finishes serving */ int done; /* set to 1 when a server finishes serving */
grpc_iomgr_closure listen_closure;
} server; } server;
static void server_init(server *sv) { static void server_init(server *sv) {
...@@ -112,6 +113,7 @@ typedef struct { ...@@ -112,6 +113,7 @@ typedef struct {
server *sv; /* not owned by a single session */ server *sv; /* not owned by a single session */
grpc_fd *em_fd; /* fd to read upload bytes */ grpc_fd *em_fd; /* fd to read upload bytes */
char read_buf[BUF_SIZE]; /* buffer to store upload bytes */ char read_buf[BUF_SIZE]; /* buffer to store upload bytes */
grpc_iomgr_closure session_read_closure;
} session; } session;
/* Called when an upload session can be safely shutdown. /* Called when an upload session can be safely shutdown.
...@@ -162,7 +164,7 @@ static void session_read_cb(void *arg, /*session*/ ...@@ -162,7 +164,7 @@ static void session_read_cb(void *arg, /*session*/
TODO(chenw): in multi-threaded version, callback and polling can be TODO(chenw): in multi-threaded version, callback and polling can be
run in different threads. polling may catch a persist read edge event run in different threads. polling may catch a persist read edge event
before notify_on_read is called. */ before notify_on_read is called. */
grpc_fd_notify_on_read(se->em_fd, session_read_cb, se); grpc_fd_notify_on_read(se->em_fd, &se->session_read_closure);
} else { } else {
gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno)); gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno));
abort(); abort();
...@@ -207,9 +209,11 @@ static void listen_cb(void *arg, /*=sv_arg*/ ...@@ -207,9 +209,11 @@ static void listen_cb(void *arg, /*=sv_arg*/
se = gpr_malloc(sizeof(*se)); se = gpr_malloc(sizeof(*se));
se->sv = sv; se->sv = sv;
se->em_fd = grpc_fd_create(fd); se->em_fd = grpc_fd_create(fd);
grpc_fd_notify_on_read(se->em_fd, session_read_cb, se); se->session_read_closure.cb = session_read_cb;
se->session_read_closure.cb_arg = se;
grpc_fd_notify_on_read(se->em_fd, &se->session_read_closure);
grpc_fd_notify_on_read(listen_em_fd, listen_cb, sv); grpc_fd_notify_on_read(listen_em_fd, &sv->listen_closure);
} }
/* Max number of connections pending to be accepted by listen(). */ /* Max number of connections pending to be accepted by listen(). */
...@@ -234,7 +238,9 @@ static int server_start(server *sv) { ...@@ -234,7 +238,9 @@ static int server_start(server *sv) {
sv->em_fd = grpc_fd_create(fd); sv->em_fd = grpc_fd_create(fd);
/* Register to be interested in reading from listen_fd. */ /* Register to be interested in reading from listen_fd. */
grpc_fd_notify_on_read(sv->em_fd, listen_cb, sv); sv->listen_closure.cb = listen_cb;
sv->listen_closure.cb_arg = sv;
grpc_fd_notify_on_read(sv->em_fd, &sv->listen_closure);
return port; return port;
} }
...@@ -268,6 +274,7 @@ typedef struct { ...@@ -268,6 +274,7 @@ typedef struct {
gpr_mu mu; /* protect done and done_cv */ gpr_mu mu; /* protect done and done_cv */
gpr_cv done_cv; /* signaled when a client finishes sending */ gpr_cv done_cv; /* signaled when a client finishes sending */
int done; /* set to 1 when a client finishes sending */ int done; /* set to 1 when a client finishes sending */
grpc_iomgr_closure write_closure;
} client; } client;
static void client_init(client *cl) { static void client_init(client *cl) {
...@@ -309,7 +316,9 @@ static void client_session_write(void *arg, /*client*/ ...@@ -309,7 +316,9 @@ static void client_session_write(void *arg, /*client*/
if (errno == EAGAIN) { if (errno == EAGAIN) {
gpr_mu_lock(&cl->mu); gpr_mu_lock(&cl->mu);
if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) { if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
grpc_fd_notify_on_write(cl->em_fd, client_session_write, cl); cl->write_closure.cb = client_session_write;
cl->write_closure.cb_arg = cl;
grpc_fd_notify_on_write(cl->em_fd, &cl->write_closure);
cl->client_write_cnt++; cl->client_write_cnt++;
} else { } else {
client_session_shutdown_cb(arg, 1); client_session_shutdown_cb(arg, 1);
...@@ -421,6 +430,13 @@ static void test_grpc_fd_change(void) { ...@@ -421,6 +430,13 @@ static void test_grpc_fd_change(void) {
int sv[2]; int sv[2];
char data; char data;
int result; int result;
grpc_iomgr_closure first_closure;
grpc_iomgr_closure second_closure;
first_closure.cb = first_read_callback;
first_closure.cb_arg = &a;
second_closure.cb = second_read_callback;
second_closure.cb_arg = &b;
init_change_data(&a); init_change_data(&a);
init_change_data(&b); init_change_data(&b);
...@@ -434,7 +450,7 @@ static void test_grpc_fd_change(void) { ...@@ -434,7 +450,7 @@ static void test_grpc_fd_change(void) {
em_fd = grpc_fd_create(sv[0]); em_fd = grpc_fd_create(sv[0]);
/* Register the first callback, then make its FD readable */ /* Register the first callback, then make its FD readable */
grpc_fd_notify_on_read(em_fd, first_read_callback, &a); grpc_fd_notify_on_read(em_fd, &first_closure);
data = 0; data = 0;
result = write(sv[1], &data, 1); result = write(sv[1], &data, 1);
GPR_ASSERT(result == 1); GPR_ASSERT(result == 1);
...@@ -453,7 +469,7 @@ static void test_grpc_fd_change(void) { ...@@ -453,7 +469,7 @@ static void test_grpc_fd_change(void) {
/* Now register a second callback with distinct change data, and do the same /* Now register a second callback with distinct change data, and do the same
thing again. */ thing again. */
grpc_fd_notify_on_read(em_fd, second_read_callback, &b); grpc_fd_notify_on_read(em_fd, &second_closure);
data = 0; data = 0;
result = write(sv[1], &data, 1); result = write(sv[1], &data, 1);
GPR_ASSERT(result == 1); GPR_ASSERT(result == 1);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment