summaryrefslogtreecommitdiffstats
path: root/drivers/block/aoe/aoecmd.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2008-10-20 16:07:19 +0100
committerDavid Vrabel <david.vrabel@csr.com>2008-10-20 16:07:19 +0100
commit61e0e79ee3c609eb34edf2fe023708cba6a79b1f (patch)
tree663deacffd4071120dc9badb70428fe5f124c7b9 /drivers/block/aoe/aoecmd.c
parentc15895ef30c2c03e99802951787183039a349d32 (diff)
parent0cfd81031a26717fe14380d18275f8e217571615 (diff)
Merge branch 'master' into for-upstream
Conflicts: Documentation/ABI/testing/sysfs-bus-usb drivers/Makefile
Diffstat (limited to 'drivers/block/aoe/aoecmd.c')
-rw-r--r--drivers/block/aoe/aoecmd.c104
1 files changed, 44 insertions, 60 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d0..71ff78c9e4d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -114,29 +114,22 @@ ifrotate(struct aoetgt *t)
static void
skb_pool_put(struct aoedev *d, struct sk_buff *skb)
{
- if (!d->skbpool_hd)
- d->skbpool_hd = skb;
- else
- d->skbpool_tl->next = skb;
- d->skbpool_tl = skb;
+ __skb_queue_tail(&d->skbpool, skb);
}
static struct sk_buff *
skb_pool_get(struct aoedev *d)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_peek(&d->skbpool);
- skb = d->skbpool_hd;
if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
- d->skbpool_hd = skb->next;
- skb->next = NULL;
+ __skb_unlink(skb, &d->skbpool);
return skb;
}
- if (d->nskbpool < NSKBPOOLMAX
- && (skb = new_skb(ETH_ZLEN))) {
- d->nskbpool++;
+ if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
+ (skb = new_skb(ETH_ZLEN)))
return skb;
- }
+
return NULL;
}
@@ -293,29 +286,22 @@ aoecmd_ata_rw(struct aoedev *d)
skb->dev = t->ifp->nd;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
- if (d->sendq_hd)
- d->sendq_tl->next = skb;
- else
- d->sendq_hd = skb;
- d->sendq_tl = skb;
- }
+ if (skb)
+ __skb_queue_tail(&d->sendq, skb);
return 1;
}
/* some callers cannot sleep, and they can call this function,
* transmitting the packets later, when interrupts are on
*/
-static struct sk_buff *
-aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
+static void
+aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
{
struct aoe_hdr *h;
struct aoe_cfghdr *ch;
- struct sk_buff *skb, *sl, *sl_tail;
+ struct sk_buff *skb;
struct net_device *ifp;
- sl = sl_tail = NULL;
-
read_lock(&dev_base_lock);
for_each_netdev(&init_net, ifp) {
dev_hold(ifp);
@@ -329,8 +315,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
}
skb_put(skb, sizeof *h + sizeof *ch);
skb->dev = ifp;
- if (sl_tail == NULL)
- sl_tail = skb;
+ __skb_queue_tail(queue, skb);
h = (struct aoe_hdr *) skb_mac_header(skb);
memset(h, 0, sizeof *h + sizeof *ch);
@@ -342,16 +327,10 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
h->minor = aoeminor;
h->cmd = AOECMD_CFG;
- skb->next = sl;
- sl = skb;
cont:
dev_put(ifp);
}
read_unlock(&dev_base_lock);
-
- if (tail != NULL)
- *tail = sl_tail;
- return sl;
}
static void
@@ -406,11 +385,7 @@ resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
- if (d->sendq_hd)
- d->sendq_tl->next = skb;
- else
- d->sendq_hd = skb;
- d->sendq_tl = skb;
+ __skb_queue_tail(&d->sendq, skb);
}
static int
@@ -508,16 +483,15 @@ ata_scnt(unsigned char *packet) {
static void
rexmit_timer(ulong vp)
{
+ struct sk_buff_head queue;
struct aoedev *d;
struct aoetgt *t, **tt, **te;
struct aoeif *ifp;
struct frame *f, *e;
- struct sk_buff *sl;
register long timeout;
ulong flags, n;
d = (struct aoedev *) vp;
- sl = NULL;
/* timeout is always ~150% of the moving average */
timeout = d->rttavg;
@@ -589,7 +563,7 @@ rexmit_timer(ulong vp)
}
}
- if (d->sendq_hd) {
+ if (!skb_queue_empty(&d->sendq)) {
n = d->rttavg <<= 1;
if (n > MAXTIMER)
d->rttavg = MAXTIMER;
@@ -600,15 +574,15 @@ rexmit_timer(ulong vp)
aoecmd_work(d);
}
- sl = d->sendq_hd;
- d->sendq_hd = d->sendq_tl = NULL;
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&d->sendq, &queue);
d->timer.expires = jiffies + TIMERTICK;
add_timer(&d->timer);
spin_unlock_irqrestore(&d->lock, flags);
- aoenet_xmit(sl);
+ aoenet_xmit(&queue);
}
/* enters with d->lock held */
@@ -645,7 +619,7 @@ aoecmd_sleepwork(struct work_struct *work)
unsigned long flags;
u64 ssize;
- ssize = d->gd->capacity;
+ ssize = get_capacity(d->gd);
bd = bdget_disk(d->gd, 0);
if (bd) {
@@ -707,7 +681,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
return;
if (d->gd != NULL) {
- d->gd->capacity = ssize;
+ set_capacity(d->gd, ssize);
d->flags |= DEVFL_NEWSIZE;
} else
d->flags |= DEVFL_GDALLOC;
@@ -756,23 +730,28 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
unsigned long n_sect = bio->bi_size >> 9;
const int rw = bio_data_dir(bio);
struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(disk, sector);
+
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, ticks[rw], duration);
+ part_stat_add(cpu, part, sectors[rw], n_sect);
+ part_stat_add(cpu, part, io_ticks, duration);
- part = get_part(disk, sector);
- all_stat_inc(disk, part, ios[rw], sector);
- all_stat_add(disk, part, ticks[rw], duration, sector);
- all_stat_add(disk, part, sectors[rw], n_sect, sector);
- all_stat_add(disk, part, io_ticks, duration, sector);
+ part_stat_unlock();
}
void
aoecmd_ata_rsp(struct sk_buff *skb)
{
+ struct sk_buff_head queue;
struct aoedev *d;
struct aoe_hdr *hin, *hout;
struct aoe_atahdr *ahin, *ahout;
struct frame *f;
struct buf *buf;
- struct sk_buff *sl;
struct aoetgt *t;
struct aoeif *ifp;
register long n;
@@ -893,21 +872,21 @@ aoecmd_ata_rsp(struct sk_buff *skb)
aoecmd_work(d);
xmit:
- sl = d->sendq_hd;
- d->sendq_hd = d->sendq_tl = NULL;
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&d->sendq, &queue);
spin_unlock_irqrestore(&d->lock, flags);
- aoenet_xmit(sl);
+ aoenet_xmit(&queue);
}
void
aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
{
- struct sk_buff *sl;
+ struct sk_buff_head queue;
- sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
-
- aoenet_xmit(sl);
+ __skb_queue_head_init(&queue);
+ aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
+ aoenet_xmit(&queue);
}
struct sk_buff *
@@ -1076,7 +1055,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
spin_unlock_irqrestore(&d->lock, flags);
- aoenet_xmit(sl);
+ if (sl) {
+ struct sk_buff_head queue;
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, sl);
+ aoenet_xmit(&queue);
+ }
}
void