Skip to content
Snippets Groups Projects
Commit b787c50d authored by Craig Tiller's avatar Craig Tiller
Browse files

Compiles with the breakup

parent 5dc3b309
No related branches found
No related tags found
No related merge requests found
...@@ -43,6 +43,10 @@ typedef struct { ...@@ -43,6 +43,10 @@ typedef struct {
gpr_timespec deadline; gpr_timespec deadline;
} grpc_chttp2_incoming_metadata_buffer; } grpc_chttp2_incoming_metadata_buffer;
typedef struct {
grpc_linked_mdelem *elems;
} grpc_chttp2_incoming_metadata_live_op_buffer;
/** assumes everything initially zeroed */ /** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(grpc_chttp2_incoming_metadata_buffer *buffer); void grpc_chttp2_incoming_metadata_buffer_init(grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_destroy(grpc_chttp2_incoming_metadata_buffer *buffer); void grpc_chttp2_incoming_metadata_buffer_destroy(grpc_chttp2_incoming_metadata_buffer *buffer);
...@@ -56,6 +60,9 @@ void grpc_chttp2_incoming_metadata_buffer_set_deadline(grpc_chttp2_incoming_meta ...@@ -56,6 +60,9 @@ void grpc_chttp2_incoming_metadata_buffer_set_deadline(grpc_chttp2_incoming_meta
out of the transport */ out of the transport */
void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb); void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb);
void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb(grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb); void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb, grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
void grpc_chttp2_incoming_metadata_live_op_buffer_end(grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
#endif /* GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H */ #endif /* GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H */
...@@ -426,6 +426,7 @@ typedef struct { ...@@ -426,6 +426,7 @@ typedef struct {
/** incoming metadata */ /** incoming metadata */
grpc_chttp2_incoming_metadata_buffer incoming_metadata; grpc_chttp2_incoming_metadata_buffer incoming_metadata;
grpc_chttp2_incoming_metadata_live_op_buffer outstanding_metadata;
} grpc_chttp2_stream_global; } grpc_chttp2_stream_global;
typedef struct { typedef struct {
... ...
......
...@@ -139,12 +139,6 @@ static void maybe_join_window_updates(grpc_chttp2_transport *t, ...@@ -139,12 +139,6 @@ static void maybe_join_window_updates(grpc_chttp2_transport *t,
static void add_metadata_batch(grpc_chttp2_transport *t, grpc_chttp2_stream *s); static void add_metadata_batch(grpc_chttp2_transport *t, grpc_chttp2_stream *s);
#endif #endif
static void flowctl_trace(grpc_chttp2_transport *t, const char *flow,
gpr_int32 window, gpr_uint32 id, gpr_int32 delta) {
gpr_log(GPR_DEBUG, "HTTP:FLOW:%p:%d:%s: %d + %d = %d", t, id, flow, window,
delta, window + delta);
}
/* /*
* CONSTRUCTION/DESTRUCTION/REFCOUNTING * CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/ */
...@@ -609,9 +603,7 @@ static void perform_op_locked(grpc_chttp2_transport_global *transport_global, gr ...@@ -609,9 +603,7 @@ static void perform_op_locked(grpc_chttp2_transport_global *transport_global, gr
stream_global->recv_done_closure = op->on_done_recv; stream_global->recv_done_closure = op->on_done_recv;
stream_global->incoming_sopb = op->recv_ops; stream_global->incoming_sopb = op->recv_ops;
stream_global->incoming_sopb->nops = 0; stream_global->incoming_sopb->nops = 0;
stream_global->publish_state = op->recv_state; grpc_chttp2_incoming_metadata_live_op_buffer_end(&stream_global->outstanding_metadata);
gpr_free(stream_global->old_incoming_metadata);
stream_global->old_incoming_metadata = NULL;
grpc_chttp2_read_write_state_changed(transport_global, stream_global); grpc_chttp2_read_write_state_changed(transport_global, stream_global);
grpc_chttp2_incoming_window_state_changed(transport_global, stream_global); grpc_chttp2_incoming_window_state_changed(transport_global, stream_global);
} }
...@@ -691,6 +683,20 @@ static void unlock_check_cancellations(grpc_chttp2_transport *t) { ...@@ -691,6 +683,20 @@ static void unlock_check_cancellations(grpc_chttp2_transport *t) {
#endif #endif
} }
static void cancel_from_api(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
grpc_status_code status) {
stream_global->cancelled = 1;
if (stream_global->in_stream_map) {
gpr_slice_buffer_add(&transport_global->qbuf,
grpc_chttp2_rst_stream_create(stream_global->id,
grpc_chttp2_grpc_status_to_http2_status(status)));
} else {
grpc_chttp2_read_write_state_changed(transport_global, stream_global);
}
}
#if 0 #if 0
static void cancel_stream_inner(grpc_chttp2_transport *t, grpc_chttp2_stream *s, gpr_uint32 id, static void cancel_stream_inner(grpc_chttp2_transport *t, grpc_chttp2_stream *s, gpr_uint32 id,
grpc_status_code local_status, grpc_status_code local_status,
...@@ -908,6 +914,7 @@ static void recv_data(void *tp, gpr_slice *slices, size_t nslices, ...@@ -908,6 +914,7 @@ static void recv_data(void *tp, gpr_slice *slices, size_t nslices,
* CALLBACK LOOP * CALLBACK LOOP
*/ */
#if 0
static grpc_stream_state compute_state(gpr_uint8 write_closed, static grpc_stream_state compute_state(gpr_uint8 write_closed,
gpr_uint8 read_closed) { gpr_uint8 read_closed) {
if (write_closed && read_closed) return GRPC_STREAM_CLOSED; if (write_closed && read_closed) return GRPC_STREAM_CLOSED;
...@@ -915,6 +922,7 @@ static grpc_stream_state compute_state(gpr_uint8 write_closed, ...@@ -915,6 +922,7 @@ static grpc_stream_state compute_state(gpr_uint8 write_closed,
if (read_closed) return GRPC_STREAM_RECV_CLOSED; if (read_closed) return GRPC_STREAM_RECV_CLOSED;
return GRPC_STREAM_OPEN; return GRPC_STREAM_OPEN;
} }
#endif
typedef struct { typedef struct {
grpc_chttp2_transport *t; grpc_chttp2_transport *t;
... ...
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment