summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-01-09 03:39:43 -0500
committerLen Brown <len.brown@intel.com>2009-01-09 03:39:43 -0500
commitb2576e1d4408e134e2188c967b1f28af39cd79d4 (patch)
tree004f3c82faab760f304ce031d6d2f572e7746a50 /drivers/infiniband/hw/ehca
parent3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff)
parent2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff)
Merge branch 'linus' into release
Diffstat (limited to 'drivers/infiniband/hw/ehca')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c12
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c13
6 files changed, 38 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 7fc35cf0cdd..c825142a2fb 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -175,6 +175,13 @@ struct ehca_queue_map {
unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
};
+/* function to calculate the next index for the qmap */
+static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
+{
+ unsigned int temp = cur_index + 1;
+ return (temp == limit) ? 0 : temp;
+}
+
struct ehca_qp {
union {
struct ib_qp ib_qp;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 49660dfa186..523e733c630 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -113,7 +113,7 @@ int ehca_create_eq(struct ehca_shca *shca,
if (h_ret != H_SUCCESS || vpage)
goto create_eq_exit2;
} else {
- if (h_ret != H_PAGE_REGISTERED || !vpage)
+ if (h_ret != H_PAGE_REGISTERED)
goto create_eq_exit2;
}
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 757035ea246..3128a5090db 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -659,12 +659,12 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
WARN_ON_ONCE(!in_interrupt());
if (ehca_debug_level >= 3)
- ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+ ehca_dmp(cpu_online_mask, cpumask_size(), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
- cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+ cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
if (cpu >= nr_cpu_ids)
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_first(cpu_online_mask);
pool->last_cpu = cpu;
spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
@@ -855,7 +855,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+ kthread_bind(cct->task, cpumask_any(cpu_online_mask));
destroy_comp_task(pool, cpu);
break;
case CPU_ONLINE:
@@ -902,7 +902,7 @@ int ehca_create_comp_pool(void)
return -ENOMEM;
spin_lock_init(&pool->last_cpu_lock);
- pool->last_cpu = any_online_cpu(cpu_online_map);
+ pool->last_cpu = cpumask_any(cpu_online_mask);
pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
if (pool->cpu_comp_tasks == NULL) {
@@ -934,10 +934,9 @@ void ehca_destroy_comp_pool(void)
unregister_hotcpu_notifier(&comp_pool_callback_nb);
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i))
- destroy_comp_task(pool, i);
- }
+ for_each_online_cpu(i)
+ destroy_comp_task(pool, i);
+
free_percpu(pool->cpu_comp_tasks);
kfree(pool);
}
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index bec7e024935..3b77b674cbf 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -717,6 +717,7 @@ static int __devinit ehca_probe(struct of_device *dev,
const u64 *handle;
struct ib_pd *ibpd;
int ret, i, eq_size;
+ unsigned long flags;
handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
if (!handle) {
@@ -830,9 +831,9 @@ static int __devinit ehca_probe(struct of_device *dev,
ehca_err(&shca->ib_device,
"Cannot create device attributes ret=%d", ret);
- spin_lock(&shca_list_lock);
+ spin_lock_irqsave(&shca_list_lock, flags);
list_add(&shca->shca_list, &shca_list);
- spin_unlock(&shca_list_lock);
+ spin_unlock_irqrestore(&shca_list_lock, flags);
return 0;
@@ -878,6 +879,7 @@ probe1:
static int __devexit ehca_remove(struct of_device *dev)
{
struct ehca_shca *shca = dev->dev.driver_data;
+ unsigned long flags;
int ret;
sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
@@ -915,9 +917,9 @@ static int __devexit ehca_remove(struct of_device *dev)
ib_dealloc_device(&shca->ib_device);
- spin_lock(&shca_list_lock);
+ spin_lock_irqsave(&shca_list_lock, flags);
list_del(&shca->shca_list);
- spin_unlock(&shca_list_lock);
+ spin_unlock_irqrestore(&shca_list_lock, flags);
return ret;
}
@@ -975,6 +977,7 @@ static int ehca_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
static unsigned long ehca_dmem_warn_time;
+ unsigned long flags;
switch (action) {
case MEM_CANCEL_OFFLINE:
@@ -985,12 +988,12 @@ static int ehca_mem_notifier(struct notifier_block *nb,
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
/* only ok if no hca is attached to the lpar */
- spin_lock(&shca_list_lock);
+ spin_lock_irqsave(&shca_list_lock, flags);
if (list_empty(&shca_list)) {
- spin_unlock(&shca_list_lock);
+ spin_unlock_irqrestore(&shca_list_lock, flags);
return NOTIFY_OK;
} else {
- spin_unlock(&shca_list_lock);
+ spin_unlock_irqrestore(&shca_list_lock, flags);
if (printk_timed_ratelimit(&ehca_dmem_warn_time,
30 * 1000))
ehca_gen_err("DMEM operations are not allowed"
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index cadbf0cdd91..f161cf173db 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1138,14 +1138,14 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
return -EFAULT;
}
- tail_idx = (qmap->tail + 1) % qmap->entries;
+ tail_idx = next_index(qmap->tail, qmap->entries);
wqe_idx = q_ofs / ipz_queue->qe_size;
/* check all processed wqes, whether a cqe is requested or not */
while (tail_idx != wqe_idx) {
if (qmap->map[tail_idx].cqe_req)
qmap->left_to_poll++;
- tail_idx = (tail_idx + 1) % qmap->entries;
+ tail_idx = next_index(tail_idx, qmap->entries);
}
/* save index in queue, where we have to start flushing */
qmap->next_wqe_idx = wqe_idx;
@@ -1195,14 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
} else {
spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
my_qp->sq_map.left_to_poll = 0;
- my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
- my_qp->sq_map.entries;
+ my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
+ my_qp->sq_map.entries);
spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
my_qp->rq_map.left_to_poll = 0;
- my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
- my_qp->rq_map.entries;
+ my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
+ my_qp->rq_map.entries);
spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
}
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 00a648f4316..c7112686782 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -726,13 +726,13 @@ repoll:
* set left_to_poll to 0 because in error state, we will not
* get any additional CQEs
*/
- my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
- my_qp->sq_map.entries;
+ my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
+ my_qp->sq_map.entries);
my_qp->sq_map.left_to_poll = 0;
ehca_add_to_err_list(my_qp, 1);
- my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
- my_qp->rq_map.entries;
+ my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
+ my_qp->rq_map.entries);
my_qp->rq_map.left_to_poll = 0;
if (HAS_RQ(my_qp))
ehca_add_to_err_list(my_qp, 0);
@@ -860,9 +860,8 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
/* mark as reported and advance next_wqe pointer */
qmap_entry->reported = 1;
- qmap->next_wqe_idx++;
- if (qmap->next_wqe_idx == qmap->entries)
- qmap->next_wqe_idx = 0;
+ qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
+ qmap->entries);
qmap_entry = &qmap->map[qmap->next_wqe_idx];
wc++; nr++;