diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2007-04-10 21:04:22 -0700 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-25 22:25:26 -0700 |
commit | aa8223c7bb0b05183e1737881ed21827aa5b9e73 (patch) | |
tree | 05c9832326edfeb878472f15cf8133ed9f014cdf | |
parent | ab6a5bb6b28a970104a34f0f6959b73cf61bdc72 (diff) |
[SK_BUFF]: Introduce tcp_hdr(), remove skb->h.th
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/atl1/atl1_main.c | 7 | ||||
-rw-r--r-- | drivers/net/bnx2.c | 8 | ||||
-rw-r--r-- | drivers/net/chelsio/sge.c | 2 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 2 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 11 | ||||
-rw-r--r-- | drivers/net/ioc3-eth.c | 2 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 7 | ||||
-rw-r--r-- | drivers/net/mv643xx_eth.c | 2 | ||||
-rw-r--r-- | drivers/net/tg3.c | 15 | ||||
-rw-r--r-- | drivers/s390/net/qeth_eddp.c | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_tso.h | 4 | ||||
-rw-r--r-- | include/linux/skbuff.h | 1 | ||||
-rw-r--r-- | include/linux/tcp.h | 9 | ||||
-rw-r--r-- | include/net/tcp.h | 2 | ||||
-rw-r--r-- | include/net/tcp_ecn.h | 6 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 4 | ||||
-rw-r--r-- | net/ipv4/syncookies.c | 36 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 22 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 28 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 32 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 13 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 32 |
23 files changed, 134 insertions, 122 deletions
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 8d5994751e2..d60c2217332 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1298,9 +1298,10 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, iph->tot_len = 0; iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); ipofst = skb_network_offset(skb); if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 73512fb1645..7e7b5f34403 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -4524,7 +4524,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; tcp_opt_len = 0; - if (skb->h.th->doff > 5) + if (tcp_hdr(skb)->doff > 5) tcp_opt_len = tcp_optlen(skb); ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); @@ -4532,9 +4532,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) iph = ip_hdr(skb); iph->check = 0; iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); if (tcp_opt_len || (iph->ihl > 5)) { vlan_tag_flags |= ((iph->ihl - 5) + (tcp_opt_len >> 2)) << 8; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index a4204dff363..43e92f9f0bc 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1872,7 +1872,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr->opcode = CPL_TX_PKT_LSO; hdr->ip_csum_dis = hdr->l4_csum_dis = 0; hdr->ip_hdr_words = ip_hdr(skb)->ihl; - hdr->tcp_hdr_words = skb->h.th->doff; + hdr->tcp_hdr_words = tcp_hdr(skb)->doff; hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, skb_shinfo(skb)->gso_size)); hdr->len = htonl(skb->len - sizeof(*hdr)); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index d38b1bcd138..a70fe9145a2 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -901,7 +901,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, CPL_ETH_II : CPL_ETH_II_VLAN; tso_info |= V_LSO_ETH_TYPE(eth_type) | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | - V_LSO_TCPHDR_WORDS(skb->h.th->doff); + V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); hdr->lso_info = htonl(tso_info); flits = 3; } else { diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4572fbba50f..e86deb2ef82 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2893,14 +2893,15 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_hdr(skb)->payload_len = 0; - skb->h.th->check = + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); @@ -2909,7 +2910,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ipcss = skb_network_offset(skb); ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb_transport_offset(skb); - tucso = (void *)&(skb->h.th->check) - (void *)skb->data; + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; tucse = 0; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index ba012e10d79..bc62e770a25 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1426,7 +1426,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (proto == IPPROTO_TCP) { csoff += offsetof(struct tcphdr, check); - skb->h.th->check = csum; + tcp_hdr(skb)->check = csum; } w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 96550d68162..e729ced52dc 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1195,13 +1195,14 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); ipcss = skb_network_offset(skb); ipcso = (void *)&(iph->check) - (void *)skb->data; ipcse = skb_transport_offset(skb) - 1; tucss = skb_transport_offset(skb); - tucso = (void *)&(skb->h.th->check) - (void *)skb->data; + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; tucse = 0; i = adapter->tx_ring.next_to_use; diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 43723839e93..ab15ecd4b3d 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1169,7 +1169,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, desc->l4i_chk = udp_hdr(skb)->check; break; case IPPROTO_TCP: - desc->l4i_chk = skb->h.th->check; + desc->l4i_chk = tcp_hdr(skb)->check; break; default: BUG(); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 7ca30d76bf6..414365c3198 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -3922,7 +3922,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - skb->h.th->check = 0; + tcp_hdr(skb)->check = 0; } else if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -4080,14 +4080,13 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) iph->check = 0; iph->tot_len = htons(mss + hdr_len); if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { - skb->h.th->check = 0; + tcp_hdr(skb)->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; - } - else { - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); - } + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 273f1745a00..b8e84674e17 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -416,7 +416,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, eddp->skb_offset += VLAN_HLEN; #endif /* CONFIG_QETH_VLAN */ } - tcph = eddp->skb->h.th; + tcph = tcp_hdr(eddp->skb); while (eddp->skb_offset < eddp->skb->len) { data_len = min((int)skb_shinfo(eddp->skb)->gso_size, (int)(eddp->skb->len - eddp->skb_offset)); diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 4040bdd8c32..c20e923cf9a 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h @@ -41,7 +41,7 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) hdr = (struct qeth_hdr_tso *) skb->data; iph = ip_hdr(skb); - tcph = skb->h.th; + tcph = tcp_hdr(skb); /*fix header to TSO values ...*/ hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; /*set values which are fix for the first approach ...*/ @@ -65,7 +65,7 @@ qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct ipv6hdr *ip6h = ipv6_hdr(skb); - struct tcphdr *tcph = skb->h.th; + struct tcphdr *tcph = tcp_hdr(skb); tcph->check = 0; if (skb->protocol == ETH_P_IPV6) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e580416de78..8f158d66d2a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -237,7 +237,6 @@ struct sk_buff { /* 4 byte hole on 64 bit*/ union { - struct tcphdr *th; struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 244ae0dacf4..911d937fb4c 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -178,14 +178,19 @@ struct tcp_md5sig { #include <net/inet_connection_sock.h> #include <net/inet_timewait_sock.h> +static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) +{ + return (struct tcphdr *)skb->h.raw; +} + static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) { - return skb->h.th->doff * 4; + return tcp_hdr(skb)->doff * 4; } static inline unsigned int tcp_optlen(const struct sk_buff *skb) { - return (skb->h.th->doff - 5) * 4; + return (tcp_hdr(skb)->doff - 5) * 4; } /* This defines a selective acknowledgement block. */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6dacc352dcf..af9273204cf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -984,7 +984,7 @@ static inline void tcp_openreq_init(struct request_sock *req, ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; - ireq->rmt_port = skb->h.th->source; + ireq->rmt_port = tcp_hdr(skb)->source; } extern void tcp_enter_memory_pressure(void); diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index 4629d77173f..b5f7c6ac088 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -54,7 +54,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, INET_ECN_xmit(sk); if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; - skb->h.th->cwr = 1; + tcp_hdr(skb)->cwr = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; } } else { @@ -62,7 +62,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, INET_ECN_dontxmit(sk); } if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) - skb->h.th->ece = 1; + tcp_hdr(skb)->ece = 1; } } @@ -70,7 +70,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) { - if (skb->h.th->cwr) + if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6d92358fc51..602268661eb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1352,8 +1352,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar .tos = RT_TOS(ip_hdr(skb)->tos) } }, /* Not quite clean, but right. */ .uli_u = { .ports = - { .sport = skb->h.th->dest, - .dport = skb->h.th->source } }, + { .sport = tcp_hdr(skb)->dest, + .dport = tcp_hdr(skb)->source } }, .proto = sk->sk_protocol }; security_skb_classify_flow(skb, &fl); if (ip_route_output_key(&rt, &fl)) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 26160717849..2da1be0589a 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -125,10 +125,11 @@ static __u16 const msstab[] = { __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) { struct tcp_sock *tp = tcp_sk(sk); + const struct iphdr *iph = ip_hdr(skb); + const struct tcphdr *th = tcp_hdr(skb); int mssind; const __u16 mss = *mssp; - tp->last_synq_overflow = jiffies; /* XXX sort msstab[] by probability? Binary search? */ @@ -138,9 +139,8 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); - return secure_tcp_syn_cookie(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, - skb->h.th->source, skb->h.th->dest, - ntohl(skb->h.th->seq), + return secure_tcp_syn_cookie(iph->saddr, iph->daddr, + th->source, th->dest, ntohl(th->seq), jiffies / (HZ * 60), mssind); } @@ -157,14 +157,13 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) */ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) { - __u32 seq; - __u32 mssind; - - seq = ntohl(skb->h.th->seq)-1; - mssind = check_tcp_syn_cookie(cookie, - ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, - skb->h.th->source, skb->h.th->dest, - seq, jiffies / (HZ * 60), COUNTER_TRIES); + const struct iphdr *iph = ip_hdr(skb); + const struct tcphdr *th = tcp_hdr(skb); + __u32 seq = ntohl(th->seq) - 1; + __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr, + th->source, th->dest, seq, + jiffies / (HZ * 60), + COUNTER_TRIES); return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; } @@ -191,14 +190,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); - __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; + const struct tcphdr *th = tcp_hdr(skb); + __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; int mss; struct rtable *rt; __u8 rcv_wscale; - if (!sysctl_tcp_syncookies || !skb->h.th->ack) + if (!sysctl_tcp_syncookies || !th->ack) goto out; if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || @@ -220,10 +220,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, } ireq = inet_rsk(req); treq = tcp_rsk(req); - treq->rcv_isn = ntohl(skb->h.th->seq) - 1; + treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; req->mss = mss; - ireq->rmt_port = skb->h.th->source; + ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->opt = NULL; @@ -261,8 +261,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, .tos = RT_CONN_FLAGS(sk) } }, .proto = IPPROTO_TCP, .uli_u = { .ports = - { .sport = skb->h.th->dest, - .dport = skb->h.th->source } } }; + { .sport = th->dest, + .dport = th->source } } }; security_req_classify_flow(req, &fl); if (ip_route_output_key(&rt, &fl)) { reqsk_free(req); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 689f9330f1b..f832f3c33ab 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -425,7 +425,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) /* Subtract 1, if FIN is in queue. */ if (answ && !skb_queue_empty(&sk->sk_receive_queue)) answ -= - ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin; + tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; } else answ = tp->urg_seq - tp->copied_seq; release_sock(sk); @@ -1016,9 +1016,9 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) skb_queue_walk(&sk->sk_receive_queue, skb) { offset = seq - TCP_SKB_CB(skb)->seq; - if (skb->h.th->syn) + if (tcp_hdr(skb)->syn) offset--; - if (offset < skb->len || skb->h.th->fin) { + if (offset < skb->len || tcp_hdr(skb)->fin) { *off = offset; return skb; } @@ -1070,7 +1070,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (offset != skb->len) break; } - if (skb->h.th->fin) { + if (tcp_hdr(skb)->fin) { sk_eat_skb(sk, skb, 0); ++seq; break; @@ -1174,11 +1174,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, break; } offset = *seq - TCP_SKB_CB(skb)->seq; - if (skb->h.th->syn) + if (tcp_hdr(skb)->syn) offset--; if (offset < skb->len) goto found_ok_skb; - if (skb->h.th->fin) + if (tcp_hdr(skb)->fin) goto found_fin_ok; BUG_TRAP(flags & MSG_PEEK); skb = skb->next; @@ -1394,7 +1394,7 @@ skip_copy: if (used + offset < skb->len) continue; - if (skb->h.th->fin) + if (tcp_hdr(skb)->fin) goto found_fin_ok; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); @@ -1563,7 +1563,7 @@ void tcp_close(struct sock *sk, long timeout) */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - - skb->h.th->fin; + tcp_hdr(skb)->fin; data_was_unread += len; __kfree_skb(skb); } @@ -2170,7 +2170,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) if (!pskb_may_pull(skb, sizeof(*th))) goto out; - th = skb->h.th; + th = tcp_hdr(skb); thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; @@ -2210,7 +2210,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) delta = htonl(oldlen + (thlen + len)); skb = segs; - th = skb->h.th; + th = tcp_hdr(skb); seq = ntohl(th->seq); do { @@ -2224,7 +2224,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) seq += len; skb = skb->next; - th = skb->h.th; + th = tcp_hdr(skb); th->seq = htonl(seq); th->cwr = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2776a8b0133..c1ce3623738 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -148,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && - !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) { + !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. @@ -2559,9 +2559,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb, u32 ack, u32 ack_seq) { int flag = 0; - u32 nwin = ntohs(skb->h.th->window); + u32 nwin = ntohs(tcp_hdr(skb)->window); - if (likely(!skb->h.th->syn)) + if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { @@ -2766,7 +2766,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); - if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) + if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; tcp_ca_event(sk, CA_EVENT_SLOW_ACK); @@ -2833,7 +2833,7 @@ uninteresting_ack: void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab) { unsigned char *ptr; - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); int length=(th->doff*4)-sizeof(struct tcphdr); ptr = (unsigned char *)(th + 1); @@ -2995,7 +2995,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; @@ -3357,8 +3357,8 @@ static void tcp_ofo_queue(struct sock *sk) __skb_unlink(skb, &tp->out_of_order_queue); __skb_queue_tail(&sk->sk_receive_queue, skb); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - if (skb->h.th->fin) - tcp_fin(skb, sk, skb->h.th); + if (tcp_hdr(skb)->fin) + tcp_fin(skb, sk, tcp_hdr(skb)); } } @@ -3366,7 +3366,7 @@ static int tcp_prune_queue(struct sock *sk); static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; @@ -3605,7 +3605,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, * - bloated or contains data before "start" or * overlaps to the next one. */ - if (!skb->h.th->syn && !skb->h.th->fin && + if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start) || (skb->next != tail && @@ -3616,7 +3616,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, start = TCP_SKB_CB(skb)->end_seq; skb = skb->next; } - if (skb == tail || skb->h.th->syn || skb->h.th->fin) + if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; while (before(start, end)) { @@ -3665,7 +3665,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, __kfree_skb(skb); NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); skb = next; - if (skb == tail || skb->h.th->syn || skb->h.th->fin) + if (skb == tail || + tcp_hdr(skb)->syn || + tcp_hdr(skb)->fin) return; } } @@ -4072,7 +4074,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen tcp_rcv_space_adjust(sk); if ((tp->ucopy.len == 0) || - (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) || + (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c146a02f849..617a5e4ca01 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -127,8 +127,8 @@ static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) { return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, - skb->h.th->dest, - skb->h.th->source); + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) @@ -499,7 +499,7 @@ out: void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(sk); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(len, inet->saddr, @@ -522,7 +522,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) return -EINVAL; iph = ip_hdr(skb); - th = skb->h.th; + th = tcp_hdr(skb); th->check = 0; th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); @@ -546,7 +546,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG @@ -622,7 +622,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) @@ -745,7 +745,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, skb = tcp_make_synack(sk, dst, req); if (skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); th->check = tcp_v4_check(skb->len, ireq->loc_addr, @@ -781,7 +781,7 @@ static void syn_flood_warning(struct sk_buff *skb) warntime = jiffies; printk(KERN_INFO "possible SYN flooding on port %d. Sending cookies.\n", - ntohs(skb->h.th->dest)); + ntohs(tcp_hdr(skb)->dest)); } } #endif @@ -1134,7 +1134,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); int length = (th->doff << 2) - sizeof(struct tcphdr); int genhash; unsigned char *ptr; @@ -1327,7 +1327,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ireq->rmt_addr = saddr; ireq->opt = tcp_v4_save_options(sk, skb); if (!want_cookie) - TCP_ECN_create_request(req, skb->h.th); + TCP_ECN_create_request(req, tcp_hdr(skb)); if (want_cookie) { #ifdef CONFIG_SYN_COOKIES @@ -1375,7 +1375,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " "request from %u.%u.%u.%u/%u\n", NIPQUAD(saddr), - ntohs(skb->h.th->source)); + ntohs(tcp_hdr(skb)->source)); dst_release(dst); goto drop_and_free; } @@ -1481,7 +1481,7 @@ exit: static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; @@ -1556,7 +1556,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ TCP_CHECK_TIMER(sk); - if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; } @@ -1582,7 +1582,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) } TCP_CHECK_TIMER(sk); - if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { + if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; } @@ -1625,7 +1625,7 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; - th = skb->h.th; + th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) goto bad_packet; @@ -1640,7 +1640,7 @@ int tcp_v4_rcv(struct sk_buff *skb) tcp_v4_checksum_init(skb))) goto bad_packet; - th = skb->h.th; + th = tcp_hdr(skb); iph = ip_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 463d2b24d2d..a12b08fca5a 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -453,7 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); } - newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale; + newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << + newtp->rx_opt.snd_wscale); newtp->max_window = newtp->snd_wnd; if (newtp->rx_opt.tstamp_ok) { @@ -488,7 +489,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, struct request_sock *req, struct request_sock **prev) { - struct tcphdr *th = skb->h.th; + const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; struct tcp_options_received tmp_opt; @@ -710,8 +711,8 @@ int tcp_child_process(struct sock *parent, struct sock *child, int state = child->sk_state; if (!sock_owned_by_user(child)) { - ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); - + ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), + skb->len); /* Wakeup parent, send SIGIO */ if (state == TCP_SYN_RECV && child->sk_state != state) parent->sk_data_ready(parent, 0); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f19f5fb361b..29c53fbb220 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -465,11 +465,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; #endif - th = (struct tcphdr *) skb_push(skb, tcp_header_size); - skb->h.th = th; + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); skb_set_owner_w(skb, sk); /* Build TCP header and checksum it. */ + th = tcp_hdr(skb); th->source = inet->sport; th->dest = inet->dport; th->seq = htonl(tcb->seq); @@ -524,7 +525,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tp->af_specific->calc_md5_hash(md5_hash_location, md5, sk, NULL, NULL, - skb->h.th, + tcp_hdr(skb), sk->sk_protocol, skb->len); } @@ -2128,8 +2129,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, if (md5) tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; #endif - skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); + th = tcp_hdr(skb); memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; @@ -2183,7 +2186,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, tp->af_specific->calc_md5_hash(md5_hash_location, md5, NULL, dst, req, - skb->h.th, sk->sk_protocol, + tcp_hdr(skb), sk->sk_protocol, skb->len); } #endif diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c573353f21c..4a55da079f5 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -117,8 +117,8 @@ static __u32 tcp_v6_init_sequence(struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, - skb->h.th->dest, - skb->h.th->source); + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, @@ -509,7 +509,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, skb = tcp_make_synack(sk, dst, req); if (skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); th->check = tcp_v6_check(th, skb->len, &treq->loc_addr, &treq->rmt_addr, @@ -838,7 +838,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; struct ipv6hdr *ip6h = ipv6_hdr(skb); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); int length = (th->doff << 2) - sizeof (*th); int genhash; u8 *ptr; @@ -946,7 +946,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); @@ -967,7 +967,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) return -EINVAL; ipv6h = ipv6_hdr(skb); - th = skb->h.th; + th = tcp_hdr(skb); th->check = 0; th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, @@ -979,7 +979,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th, *t1; + struct tcphdr *th = tcp_hdr(skb), *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(*th); @@ -1079,7 +1079,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { - struct tcphdr *th = skb->h.th, *t1; + struct tcphdr *th = tcp_hdr(skb), *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(struct tcphdr); @@ -1195,7 +1195,7 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { struct request_sock *req, **prev; - const struct tcphdr *th = skb->h.th; + const struct tcphdr *th = tcp_hdr(skb); struct sock *nsk; /* Find possible connection requests. */ @@ -1275,7 +1275,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq = inet6_rsk(req); ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); - TCP_ECN_create_request(req, skb->h.th); + TCP_ECN_create_request(req, tcp_hdr(skb)); treq->pktopts = NULL; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || @@ -1528,14 +1528,14 @@ out: static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { - if (!tcp_v6_check(skb->h.th, skb->len, &ipv6_hdr(skb)->saddr, + if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } - skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th, skb->len, + skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0)); @@ -1601,7 +1601,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ TCP_CHECK_TIMER(sk); - if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; TCP_CHECK_TIMER(sk); if (opt_skb) @@ -1632,7 +1632,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) } TCP_CHECK_TIMER(sk); - if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) + if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) goto reset; TCP_CHECK_TIMER(sk); if (opt_skb) @@ -1698,7 +1698,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb) if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; - th = skb->h.th; + th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr)/4) goto bad_packet; @@ -1709,7 +1709,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb) tcp_v6_checksum_init(skb))) goto bad_packet; - th = skb->h.th; + th = tcp_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); |