Skip to content
Snippets Groups Projects
Commit cd4508f9 authored by murgatroid99's avatar murgatroid99
Browse files

Fix some memory leaks in UV TCP code

parent 4e014a24
No related branches found
No related tags found
No related merge requests found
...@@ -54,7 +54,7 @@ static int retry_named_port_failure(int status, request *r, ...@@ -54,7 +54,7 @@ static int retry_named_port_failure(int status, request *r,
int retry_status; int retry_status;
uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t)); uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r; req->data = r;
r->port = svc[i][1]; r->port = gpr_strdup(svc[i][1]);
retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb, retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb,
r->host, r->port, r->hints); r->host, r->port, r->hints);
if (retry_status < 0 || getaddrinfo_cb == NULL) { if (retry_status < 0 || getaddrinfo_cb == NULL) {
...@@ -127,6 +127,8 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, ...@@ -127,6 +127,8 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error); GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints); gpr_free(r->hints);
gpr_free(r->host);
gpr_free(r->port);
gpr_free(r); gpr_free(r);
uv_freeaddrinfo(res); uv_freeaddrinfo(res);
} }
......
...@@ -48,6 +48,7 @@ typedef struct grpc_uv_tcp_connect { ...@@ -48,6 +48,7 @@ typedef struct grpc_uv_tcp_connect {
static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx, static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
grpc_uv_tcp_connect *connect) { grpc_uv_tcp_connect *connect) {
grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota); grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota);
gpr_free(connect->addr_name);
gpr_free(connect); gpr_free(connect);
} }
...@@ -105,6 +106,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) { ...@@ -105,6 +106,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
} }
done = (--connect->refs == 0); done = (--connect->refs == 0);
if (done) { if (done) {
grpc_exec_ctx_flush(&exec_ctx);
uv_tcp_connect_cleanup(&exec_ctx, connect); uv_tcp_connect_cleanup(&exec_ctx, connect);
} }
GRPC_CLOSURE_SCHED(&exec_ctx, closure, error); GRPC_CLOSURE_SCHED(&exec_ctx, closure, error);
...@@ -140,6 +142,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, ...@@ -140,6 +142,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
connect->resource_quota = resource_quota; connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle); uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect; connect->connect_req.data = connect;
connect->refs = 1;
if (GRPC_TRACER_ON(grpc_tcp_trace)) { if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting", gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
......
...@@ -234,6 +234,7 @@ static void on_connect(uv_stream_t *server, int status) { ...@@ -234,6 +234,7 @@ static void on_connect(uv_stream_t *server, int status) {
sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
acceptor); acceptor);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_free(peer_name_string);
} }
} }
......
...@@ -67,6 +67,8 @@ typedef struct { ...@@ -67,6 +67,8 @@ typedef struct {
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_slice_unref_internal(exec_ctx, tcp->read_slice); grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
grpc_resource_user_unref(exec_ctx, tcp->resource_user); grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp->handle);
gpr_free(tcp->peer_string);
gpr_free(tcp); gpr_free(tcp);
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment