summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/cm.c')
-rw-r--r--drivers/infiniband/core/cm.c144
1 files changed, 103 insertions, 41 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 25b1018a476..d446998b12a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@ struct cm_av {
};
struct cm_work {
- struct work_struct work;
+ struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
@@ -147,12 +147,12 @@ struct cm_id_private {
__be32 rq_psn;
int timeout_ms;
enum ib_mtu path_mtu;
+ __be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
- u8 local_ack_timeout;
u8 retry_count;
u8 rnr_retry_count;
u8 service_timeout;
@@ -161,7 +161,7 @@ struct cm_id_private {
atomic_t work_count;
};
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data,
if (!private_data || !private_data_len)
return NULL;
- data = kmalloc(private_data_len, GFP_KERNEL);
+ data = kmemdup(private_data, private_data_len, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- memcpy(data, private_data, private_data_len);
return data;
}
@@ -669,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
- INIT_WORK(&timewait_info->work.work, cm_work_handler,
- &timewait_info->work);
+ INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
@@ -691,7 +689,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
* timewait before notifying the user that we've exited timewait.
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
- wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
+ wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
cm_id_priv->timewait_info = NULL;
@@ -1010,6 +1008,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->retry_count = param->retry_count;
cm_id_priv->path_mtu = param->primary_path->mtu;
+ cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
@@ -1024,8 +1023,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->local_ack_timeout =
- cm_req_get_primary_local_ack_timeout(req_msg);
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1410,9 +1407,8 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
+ cm_id_priv->pkey = req_msg->pkey;
cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->local_ack_timeout =
- cm_req_get_primary_local_ack_timeout(req_msg);
cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@ -1716,7 +1712,7 @@ static int cm_establish_handler(struct cm_work *work)
unsigned long flags;
int ret;
- /* See comment in ib_cm_establish about lookup. */
+ /* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
if (!cm_id_priv)
return -EINVAL;
@@ -2402,11 +2398,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
- cm_id->lap_state != IB_CM_LAP_IDLE) {
+ (cm_id->lap_state != IB_CM_LAP_UNINIT &&
+ cm_id->lap_state != IB_CM_LAP_IDLE)) {
ret = -EINVAL;
goto out;
}
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ if (ret)
+ goto out;
+
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
@@ -2431,7 +2432,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_lap);
-static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
+static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
+ struct ib_sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
memset(path, 0, sizeof *path);
@@ -2443,10 +2445,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
path->hop_limit = lap_msg->alt_hop_limit;
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
path->reversible = 1;
- /* pkey is same as in REQ */
+ path->pkey = cm_id_priv->pkey;
path->sl = cm_lap_get_sl(lap_msg);
path->mtu_selector = IB_SA_EQ;
- /* mtu is same as in REQ */
+ path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
path->rate = cm_lap_get_packet_rate(lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
@@ -2472,7 +2474,7 @@ static int cm_lap_handler(struct cm_work *work)
param = &work->cm_event.param.lap_rcvd;
param->alternate_path = &work->path[0];
- cm_format_path_from_lap(param->alternate_path, lap_msg);
+ cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -2480,6 +2482,7 @@ static int cm_lap_handler(struct cm_work *work)
goto unlock;
switch (cm_id_priv->id.lap_state) {
+ case IB_CM_LAP_UNINIT:
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
@@ -2502,6 +2505,10 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
+ cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -2987,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
}
}
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
{
- struct cm_work *work = data;
+ struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
@@ -3040,7 +3047,7 @@ static void cm_work_handler(void *data)
cm_free_work(work);
}
-int ib_cm_establish(struct ib_cm_id *cm_id)
+static int cm_establish(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
@@ -3079,16 +3086,53 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
-EXPORT_SYMBOL(ib_cm_establish);
+
+static int cm_migrate(struct ib_cm_id *cm_id)
+{
+ struct cm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id->state == IB_CM_ESTABLISHED &&
+ (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+ cm_id->lap_state == IB_CM_LAP_IDLE)) {
+ cm_id->lap_state = IB_CM_LAP_IDLE;
+ cm_id_priv->av = cm_id_priv->alt_av;
+ } else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ return ret;
+}
+
+int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
+{
+ int ret;
+
+ switch (event) {
+ case IB_EVENT_COMM_EST:
+ ret = cm_establish(cm_id);
+ break;
+ case IB_EVENT_PATH_MIG:
+ ret = cm_migrate(cm_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
@@ -3146,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
return;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = (struct cm_port *)mad_agent->context;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3173,8 +3217,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT;
- qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE;
+ qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
@@ -3222,6 +3265,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+ qp_attr->alt_timeout =
+ cm_id_priv->alt_av.packet_life_time + 1;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
}
ret = 0;
@@ -3243,24 +3289,40 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
+ /* Allow transition to RTS before sending REP */
+ case IB_CM_REQ_RCVD:
+ case IB_CM_MRA_REQ_SENT:
+
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
- *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
- qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
- if (cm_id_priv->qp_type == IB_QPT_RC) {
- *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
- IB_QP_RNR_RETRY |
- IB_QP_MAX_QP_RD_ATOMIC;
- qp_attr->timeout = cm_id_priv->local_ack_timeout;
- qp_attr->retry_cnt = cm_id_priv->retry_count;
- qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
- qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
- }
- if (cm_id_priv->alt_av.ah_attr.dlid) {
- *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
+ *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
+ qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
+ if (cm_id_priv->qp_type == IB_QPT_RC) {
+ *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+ IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC;
+ qp_attr->timeout =
+ cm_id_priv->av.packet_life_time + 1;
+ qp_attr->retry_cnt = cm_id_priv->retry_count;
+ qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
+ qp_attr->max_rd_atomic =
+ cm_id_priv->initiator_depth;
+ }
+ if (cm_id_priv->alt_av.ah_attr.dlid) {
+ *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+ qp_attr->path_mig_state = IB_MIG_REARM;
+ }
+ } else {
+ *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
+ qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+ qp_attr->alt_timeout =
+ cm_id_priv->alt_av.packet_life_time + 1;
+ qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
qp_attr->path_mig_state = IB_MIG_REARM;
}
ret = 0;