summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-05-02 09:21:50 +0100
committerDave Airlie <airlied@redhat.com>2012-05-02 09:22:29 +0100
commit5bc69bf9aeb73547cad8e1ce683a103fe9728282 (patch)
treed3ef275532fc4391cb645f8b4d45d39d7fbb73f4 /net/ipv4/tcp.c
parentc6543a6e64ad8e456674a1c4a01dd024e38b665f (diff)
parenta85d4bcb8a0cd5b3c754f98ff91ef2b9b3a73bc5 (diff)
Merge tag 'drm-intel-next-2012-04-23' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next
Daniel Vetter writes: A new drm-intel-next pull. Highlights: - More gmbus patches from Daniel Kurtz, I think gmbus is now ready, all known issues fixed. - Fencing cleanup and pipelined fencing removal from Chris. - rc6 residency interface from Ben, useful for powertop. - Cleanups and code reorg around the ringbuffer code (Ben&me). - Use hw semaphores in the pageflip code from Ben. - More vlv stuff from Jesse, unfortunately his vlv cpu is doa, so less merged than I've hoped for - we still have the unused function warning :( - More hsw patches from Eugeni, again, not yet enabled fully. - intel_pm.c refactoring from Eugeni. - Ironlake sprite support from Chris. - And various smaller improvements/fixes all over the place. Note that this pull request also contains a backmerge of -rc3 to sort out a few things in -next. I've also had to frob the shortlog a bit to exclude anything that -rc3 brings in with this pull. Regression wise we have a few strange bugs going on, but for all of them closer inspection revealed that they've been pre-existing, just now slightly more likely to be hit. And for most of them we have a patch already. Otherwise QA has not reported any regressions, and I'm also not aware of anything bad happening in 3.4. * tag 'drm-intel-next-2012-04-23' of git://people.freedesktop.org/~danvet/drm-intel: (420 commits) drm/i915: rc6 residency (fix the fix) drm/i915/tv: fix open-coded ARRAY_SIZE. drm/i915: invalidate render cache on gen2 drm/i915: Silence the change of LVDS sync polarity drm/i915: add generic power management initialization drm/i915: move clock gating functionality into intel_pm module drm/i915: move emon functionality into intel_pm module drm/i915: move drps, rps and rc6-related functions to intel_pm drm/i915: fix line breaks in intel_pm drm/i915: move watermarks settings into intel_pm module drm/i915: move fbc-related functionality into intel_pm module drm/i915: Refactor get_fence() to use the common fence writing routine drm/i915: Refactor fence clearing to use the common fence writing routine drm/i915: Refactor put_fence() to use the common fence writing routine drm/i915: Prepare to consolidate fence writing drm/i915: Remove the unsightly "optimisation" from flush_fence() drm/i915: Simplify fence finding drm/i915: Discard the unused obj->last_fenced_ring drm/i915: Remove unused ring->setup_seqno drm/i915: Remove fence pipelining ...
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d54ed30e82..8bb6adeb62c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
if (skb) {
if (sk_wmem_schedule(sk, skb->truesize)) {
+ skb_reserve(skb, sk->sk_prot->max_header);
/*
* Make sure that we have exactly size bytes
* available to the caller, no more, no less.
*/
- skb_reserve(skb, skb_tailroom(skb) - size);
+ skb->avail_size = size;
return skb;
}
__kfree_skb(skb);
@@ -995,10 +996,9 @@ new_segment:
copy = seglen;
/* Where to copy to? */
- if (skb_tailroom(skb) > 0) {
+ if (skb_availroom(skb) > 0) {
/* We have some space in skb head. Superb! */
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ copy = min_t(int, copy, skb_availroom(skb));
err = skb_add_data_nocache(sk, skb, from, copy);
if (err)
goto do_fault;
@@ -1452,7 +1452,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
- dma_find_channel(DMA_MEMCPY)) {
+ net_dma_find_channel()) {
preempt_enable_no_resched();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1667,7 +1667,7 @@ do_prequeue:
if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
- tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+ tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -3302,8 +3302,7 @@ void __init tcp_init(void)
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
- limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
- limit = max(limit, 128UL);
+ limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;