diff options
Diffstat (limited to 'net/sctp/sm_statefuns.c')
-rw-r--r-- | net/sctp/sm_statefuns.c | 109 |
1 files changed, 87 insertions, 22 deletions
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7288192f7df..1ef9de9bbae 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -334,6 +334,15 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + /* If the INIT is coming toward a closing socket, we'll send back + * and ABORT. Essentially, this catches the race of INIT being + * backloged to the socket at the same time as the user isses close(). + * Since the socket and all its associations are going away, we + * can treat this OOTB + */ + if (sctp_sstate(ep->base.sk, CLOSING)) + return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); + /* Verify the INIT chunk before processing it. */ err_chunk = NULL; if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, @@ -375,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, if (!new_asoc) goto nomem; + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, + sctp_scope(sctp_source(chunk)), + GFP_ATOMIC) < 0) + goto nomem_init; + /* The call, sctp_process_init(), can fail on memory allocation. */ if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, sctp_source(chunk), @@ -392,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, len = ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); - if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) - goto nomem_init; - repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem_init; @@ -962,7 +973,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, { struct sctp_transport *transport = (struct sctp_transport *) arg; - if (asoc->overall_error_count > asoc->max_retrans) { + if (asoc->overall_error_count >= asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ @@ -985,14 +996,15 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, sctp_sf_heartbeat(ep, asoc, type, arg, commands)) return SCTP_DISPOSITION_NOMEM; + /* Set transport error counter and association error counter * when sending heartbeat. */ - sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE, - SCTP_TRANSPORT(transport)); sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, SCTP_TRANSPORT(transport)); } + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE, + SCTP_TRANSPORT(transport)); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, SCTP_TRANSPORT(transport)); @@ -1106,7 +1118,8 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, return sctp_sf_pdiscard(ep, asoc, type, arg, commands); /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) + + sizeof(sctp_sender_hb_info_t))) return sctp_sf_violation_chunklen(ep, asoc, type, arg, commands); @@ -1442,6 +1455,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( if (!new_asoc) goto nomem; + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, + sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) + goto nomem; + /* In the outbound INIT ACK the endpoint MUST copy its current * Verification Tag and Peers Verification tag into a reserved * place (local tie-tag and per tie-tag) within the state cookie. @@ -1478,9 +1495,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( sizeof(sctp_chunkhdr_t); } - if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) - goto nomem; - repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem; @@ -1707,7 +1721,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_COOKIE_IN_SHUTDOWN, - NULL, 0); + NULL, 0, 0); if (err) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err)); @@ -2561,6 +2575,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, chunk->subh.shutdown_hdr = sdh; ctsn = ntohl(sdh->cum_tsn_ack); + if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { + SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); + SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); + return SCTP_DISPOSITION_DISCARD; + } + /* If Cumulative TSN Ack beyond the max tsn currently * send, terminating the association and respond to the * sender with an ABORT. @@ -2624,6 +2644,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, { struct sctp_chunk *chunk = arg; sctp_shutdownhdr_t *sdh; + __u32 ctsn; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); @@ -2635,12 +2656,19 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, commands); sdh = (sctp_shutdownhdr_t *)chunk->skb->data; + ctsn = ntohl(sdh->cum_tsn_ack); + + if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { + SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); + SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); + return SCTP_DISPOSITION_DISCARD; + } /* If Cumulative TSN Ack beyond the max tsn currently * send, terminating the association and respond to the * sender with an ABORT. */ - if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn)) + if (!TSN_lt(ctsn, asoc->next_tsn)) return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); /* verify, by checking the Cumulative TSN Ack field of the @@ -2841,6 +2869,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; + sctp_arg_t force = SCTP_NOFORCE(); int error; if (!sctp_vtag_verify(chunk, asoc)) { @@ -2867,10 +2896,16 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, goto discard_force; case SCTP_IERROR_NO_DATA: goto consume; + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_abort_violation(ep, asoc, chunk, commands, + (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); default: BUG(); } + if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) + force = SCTP_FORCE(); + if (asoc->autoclose) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); @@ -2899,7 +2934,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, * more aggressive than the following algorithms allow. */ if (chunk->end_of_packet) - sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); return SCTP_DISPOSITION_CONSUME; @@ -2924,7 +2959,7 @@ discard_force: discard_noforce: if (chunk->end_of_packet) - sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); return SCTP_DISPOSITION_DISCARD; consume: @@ -2977,6 +3012,9 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, break; case SCTP_IERROR_NO_DATA: goto consume; + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_abort_violation(ep, asoc, chunk, commands, + (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); default: BUG(); } @@ -3519,6 +3557,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); if (!asconf_ack) return SCTP_DISPOSITION_DISCARD; + + /* Reset the transport so that we select the correct one + * this time around. This is to make sure that we don't + * accidentally use a stale transport that's been removed. + */ + asconf_ack->transport = NULL; } else { /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since * it must be either a stale packet or from an attacker. @@ -3934,7 +3978,7 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep, err_chunk = sctp_make_op_error(asoc, chunk, SCTP_ERROR_UNSUP_HMAC, &auth_hdr->hmac_id, - sizeof(__u16)); + sizeof(__u16), 0); if (err_chunk) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); @@ -4026,7 +4070,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep, hdr = unk_chunk->chunk_hdr; err_chunk = sctp_make_op_error(asoc, unk_chunk, SCTP_ERROR_UNKNOWN_CHUNK, hdr, - WORD_ROUND(ntohs(hdr->length))); + WORD_ROUND(ntohs(hdr->length)), + 0); if (err_chunk) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); @@ -4045,7 +4090,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep, hdr = unk_chunk->chunk_hdr; err_chunk = sctp_make_op_error(asoc, unk_chunk, SCTP_ERROR_UNKNOWN_CHUNK, hdr, - WORD_ROUND(ntohs(hdr->length))); + WORD_ROUND(ntohs(hdr->length)), + 0); if (err_chunk) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); @@ -4546,9 +4592,9 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *chunk = arg; + struct sctp_datamsg *msg = arg; - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg)); return SCTP_DISPOSITION_CONSUME; } @@ -5847,6 +5893,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, __u32 tsn; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sock *sk = asoc->base.sk; + u16 ssn; + u16 sid; + u8 ordered = 0; data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); @@ -5986,8 +6035,10 @@ static int sctp_eat_data(const struct sctp_association *asoc, */ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); - else + else { SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); + ordered = 1; + } /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * @@ -5997,19 +6048,33 @@ static int sctp_eat_data(const struct sctp_association *asoc, * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) * and discard the DATA chunk. */ - if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { + sid = ntohs(data_hdr->stream); + if (sid >= asoc->c.sinit_max_instreams) { /* Mark tsn as received even though we drop it */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, &data_hdr->stream, - sizeof(data_hdr->stream)); + sizeof(data_hdr->stream), + sizeof(u16)); if (err) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err)); return SCTP_IERROR_BAD_STREAM; } + /* Check to see if the SSN is possible for this TSN. + * The biggest gap we can record is 4K wide. Since SSNs wrap + * at an unsigned short, there is no way that an SSN can + * wrap and for a valid TSN. We can simply check if the current + * SSN is smaller then the next expected one. If it is, it wrapped + * and is invalid. + */ + ssn = ntohs(data_hdr->ssn); + if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) { + return SCTP_IERROR_PROTO_VIOLATION; + } + /* Send the data up to the user. Note: Schedule the * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK * chunk needs the updated rwnd. |