IB/core: Define 'ib' and 'roce' rdma_ah_attr types
[linux.git] / drivers / infiniband / core / cm.c
1 /*
2  * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
51
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
54 #include "cm_msgs.h"
55
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
59
60 static const char * const ibcm_rej_reason_strs[] = {
61         [IB_CM_REJ_NO_QP]                       = "no QP",
62         [IB_CM_REJ_NO_EEC]                      = "no EEC",
63         [IB_CM_REJ_NO_RESOURCES]                = "no resources",
64         [IB_CM_REJ_TIMEOUT]                     = "timeout",
65         [IB_CM_REJ_UNSUPPORTED]                 = "unsupported",
66         [IB_CM_REJ_INVALID_COMM_ID]             = "invalid comm ID",
67         [IB_CM_REJ_INVALID_COMM_INSTANCE]       = "invalid comm instance",
68         [IB_CM_REJ_INVALID_SERVICE_ID]          = "invalid service ID",
69         [IB_CM_REJ_INVALID_TRANSPORT_TYPE]      = "invalid transport type",
70         [IB_CM_REJ_STALE_CONN]                  = "stale conn",
71         [IB_CM_REJ_RDC_NOT_EXIST]               = "RDC not exist",
72         [IB_CM_REJ_INVALID_GID]                 = "invalid GID",
73         [IB_CM_REJ_INVALID_LID]                 = "invalid LID",
74         [IB_CM_REJ_INVALID_SL]                  = "invalid SL",
75         [IB_CM_REJ_INVALID_TRAFFIC_CLASS]       = "invalid traffic class",
76         [IB_CM_REJ_INVALID_HOP_LIMIT]           = "invalid hop limit",
77         [IB_CM_REJ_INVALID_PACKET_RATE]         = "invalid packet rate",
78         [IB_CM_REJ_INVALID_ALT_GID]             = "invalid alt GID",
79         [IB_CM_REJ_INVALID_ALT_LID]             = "invalid alt LID",
80         [IB_CM_REJ_INVALID_ALT_SL]              = "invalid alt SL",
81         [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS]   = "invalid alt traffic class",
82         [IB_CM_REJ_INVALID_ALT_HOP_LIMIT]       = "invalid alt hop limit",
83         [IB_CM_REJ_INVALID_ALT_PACKET_RATE]     = "invalid alt packet rate",
84         [IB_CM_REJ_PORT_CM_REDIRECT]            = "port CM redirect",
85         [IB_CM_REJ_PORT_REDIRECT]               = "port redirect",
86         [IB_CM_REJ_INVALID_MTU]                 = "invalid MTU",
87         [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
88         [IB_CM_REJ_CONSUMER_DEFINED]            = "consumer defined",
89         [IB_CM_REJ_INVALID_RNR_RETRY]           = "invalid RNR retry",
90         [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID]     = "duplicate local comm ID",
91         [IB_CM_REJ_INVALID_CLASS_VERSION]       = "invalid class version",
92         [IB_CM_REJ_INVALID_FLOW_LABEL]          = "invalid flow label",
93         [IB_CM_REJ_INVALID_ALT_FLOW_LABEL]      = "invalid alt flow label",
94 };
95
96 const char *__attribute_const__ ibcm_reject_msg(int reason)
97 {
98         size_t index = reason;
99
100         if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
101             ibcm_rej_reason_strs[index])
102                 return ibcm_rej_reason_strs[index];
103         else
104                 return "unrecognized reason";
105 }
106 EXPORT_SYMBOL(ibcm_reject_msg);
107
108 static void cm_add_one(struct ib_device *device);
109 static void cm_remove_one(struct ib_device *device, void *client_data);
110
111 static struct ib_client cm_client = {
112         .name   = "cm",
113         .add    = cm_add_one,
114         .remove = cm_remove_one
115 };
116
117 static struct ib_cm {
118         spinlock_t lock;
119         struct list_head device_list;
120         rwlock_t device_lock;
121         struct rb_root listen_service_table;
122         u64 listen_service_id;
123         /* struct rb_root peer_service_table; todo: fix peer to peer */
124         struct rb_root remote_qp_table;
125         struct rb_root remote_id_table;
126         struct rb_root remote_sidr_table;
127         struct idr local_id_table;
128         __be32 random_id_operand;
129         struct list_head timewait_list;
130         struct workqueue_struct *wq;
131         /* Sync on cm change port state */
132         spinlock_t state_lock;
133 } cm;
134
135 /* Counter indexes ordered by attribute ID */
136 enum {
137         CM_REQ_COUNTER,
138         CM_MRA_COUNTER,
139         CM_REJ_COUNTER,
140         CM_REP_COUNTER,
141         CM_RTU_COUNTER,
142         CM_DREQ_COUNTER,
143         CM_DREP_COUNTER,
144         CM_SIDR_REQ_COUNTER,
145         CM_SIDR_REP_COUNTER,
146         CM_LAP_COUNTER,
147         CM_APR_COUNTER,
148         CM_ATTR_COUNT,
149         CM_ATTR_ID_OFFSET = 0x0010,
150 };
151
152 enum {
153         CM_XMIT,
154         CM_XMIT_RETRIES,
155         CM_RECV,
156         CM_RECV_DUPLICATES,
157         CM_COUNTER_GROUPS
158 };
159
160 static char const counter_group_names[CM_COUNTER_GROUPS]
161                                      [sizeof("cm_rx_duplicates")] = {
162         "cm_tx_msgs", "cm_tx_retries",
163         "cm_rx_msgs", "cm_rx_duplicates"
164 };
165
166 struct cm_counter_group {
167         struct kobject obj;
168         atomic_long_t counter[CM_ATTR_COUNT];
169 };
170
171 struct cm_counter_attribute {
172         struct attribute attr;
173         int index;
174 };
175
176 #define CM_COUNTER_ATTR(_name, _index) \
177 struct cm_counter_attribute cm_##_name##_counter_attr = { \
178         .attr = { .name = __stringify(_name), .mode = 0444 }, \
179         .index = _index \
180 }
181
182 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
183 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
184 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
185 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
186 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
187 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
188 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
189 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
190 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
191 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
192 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
193
194 static struct attribute *cm_counter_default_attrs[] = {
195         &cm_req_counter_attr.attr,
196         &cm_mra_counter_attr.attr,
197         &cm_rej_counter_attr.attr,
198         &cm_rep_counter_attr.attr,
199         &cm_rtu_counter_attr.attr,
200         &cm_dreq_counter_attr.attr,
201         &cm_drep_counter_attr.attr,
202         &cm_sidr_req_counter_attr.attr,
203         &cm_sidr_rep_counter_attr.attr,
204         &cm_lap_counter_attr.attr,
205         &cm_apr_counter_attr.attr,
206         NULL
207 };
208
209 struct cm_port {
210         struct cm_device *cm_dev;
211         struct ib_mad_agent *mad_agent;
212         struct kobject port_obj;
213         u8 port_num;
214         struct list_head cm_priv_prim_list;
215         struct list_head cm_priv_altr_list;
216         struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
217 };
218
219 struct cm_device {
220         struct list_head list;
221         struct ib_device *ib_device;
222         struct device *device;
223         u8 ack_delay;
224         int going_down;
225         struct cm_port *port[0];
226 };
227
228 struct cm_av {
229         struct cm_port *port;
230         union ib_gid dgid;
231         struct rdma_ah_attr ah_attr;
232         u16 pkey_index;
233         u8 timeout;
234 };
235
236 struct cm_work {
237         struct delayed_work work;
238         struct list_head list;
239         struct cm_port *port;
240         struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
241         __be32 local_id;                        /* Established / timewait */
242         __be32 remote_id;
243         struct ib_cm_event cm_event;
244         struct ib_sa_path_rec path[0];
245 };
246
247 struct cm_timewait_info {
248         struct cm_work work;                    /* Must be first. */
249         struct list_head list;
250         struct rb_node remote_qp_node;
251         struct rb_node remote_id_node;
252         __be64 remote_ca_guid;
253         __be32 remote_qpn;
254         u8 inserted_remote_qp;
255         u8 inserted_remote_id;
256 };
257
258 struct cm_id_private {
259         struct ib_cm_id id;
260
261         struct rb_node service_node;
262         struct rb_node sidr_id_node;
263         spinlock_t lock;        /* Do not acquire inside cm.lock */
264         struct completion comp;
265         atomic_t refcount;
266         /* Number of clients sharing this ib_cm_id. Only valid for listeners.
267          * Protected by the cm.lock spinlock. */
268         int listen_sharecount;
269
270         struct ib_mad_send_buf *msg;
271         struct cm_timewait_info *timewait_info;
272         /* todo: use alternate port on send failure */
273         struct cm_av av;
274         struct cm_av alt_av;
275
276         void *private_data;
277         __be64 tid;
278         __be32 local_qpn;
279         __be32 remote_qpn;
280         enum ib_qp_type qp_type;
281         __be32 sq_psn;
282         __be32 rq_psn;
283         int timeout_ms;
284         enum ib_mtu path_mtu;
285         __be16 pkey;
286         u8 private_data_len;
287         u8 max_cm_retries;
288         u8 peer_to_peer;
289         u8 responder_resources;
290         u8 initiator_depth;
291         u8 retry_count;
292         u8 rnr_retry_count;
293         u8 service_timeout;
294         u8 target_ack_delay;
295
296         struct list_head prim_list;
297         struct list_head altr_list;
298         /* Indicates that the send port mad is registered and av is set */
299         int prim_send_port_not_ready;
300         int altr_send_port_not_ready;
301
302         struct list_head work_list;
303         atomic_t work_count;
304 };
305
306 static void cm_work_handler(struct work_struct *work);
307
308 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
309 {
310         if (atomic_dec_and_test(&cm_id_priv->refcount))
311                 complete(&cm_id_priv->comp);
312 }
313
314 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
315                         struct ib_mad_send_buf **msg)
316 {
317         struct ib_mad_agent *mad_agent;
318         struct ib_mad_send_buf *m;
319         struct ib_ah *ah;
320         struct cm_av *av;
321         unsigned long flags, flags2;
322         int ret = 0;
323
324         /* don't let the port to be released till the agent is down */
325         spin_lock_irqsave(&cm.state_lock, flags2);
326         spin_lock_irqsave(&cm.lock, flags);
327         if (!cm_id_priv->prim_send_port_not_ready)
328                 av = &cm_id_priv->av;
329         else if (!cm_id_priv->altr_send_port_not_ready &&
330                  (cm_id_priv->alt_av.port))
331                 av = &cm_id_priv->alt_av;
332         else {
333                 pr_info("%s: not valid CM id\n", __func__);
334                 ret = -ENODEV;
335                 spin_unlock_irqrestore(&cm.lock, flags);
336                 goto out;
337         }
338         spin_unlock_irqrestore(&cm.lock, flags);
339         /* Make sure the port haven't released the mad yet */
340         mad_agent = cm_id_priv->av.port->mad_agent;
341         if (!mad_agent) {
342                 pr_info("%s: not a valid MAD agent\n", __func__);
343                 ret = -ENODEV;
344                 goto out;
345         }
346         ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr);
347         if (IS_ERR(ah)) {
348                 ret = PTR_ERR(ah);
349                 goto out;
350         }
351
352         m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
353                                av->pkey_index,
354                                0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
355                                GFP_ATOMIC,
356                                IB_MGMT_BASE_VERSION);
357         if (IS_ERR(m)) {
358                 rdma_destroy_ah(ah);
359                 ret = PTR_ERR(m);
360                 goto out;
361         }
362
363         /* Timeout set by caller if response is expected. */
364         m->ah = ah;
365         m->retries = cm_id_priv->max_cm_retries;
366
367         atomic_inc(&cm_id_priv->refcount);
368         m->context[0] = cm_id_priv;
369         *msg = m;
370
371 out:
372         spin_unlock_irqrestore(&cm.state_lock, flags2);
373         return ret;
374 }
375
376 static int cm_alloc_response_msg(struct cm_port *port,
377                                  struct ib_mad_recv_wc *mad_recv_wc,
378                                  struct ib_mad_send_buf **msg)
379 {
380         struct ib_mad_send_buf *m;
381         struct ib_ah *ah;
382
383         ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
384                                   mad_recv_wc->recv_buf.grh, port->port_num);
385         if (IS_ERR(ah))
386                 return PTR_ERR(ah);
387
388         m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
389                                0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
390                                GFP_ATOMIC,
391                                IB_MGMT_BASE_VERSION);
392         if (IS_ERR(m)) {
393                 rdma_destroy_ah(ah);
394                 return PTR_ERR(m);
395         }
396         m->ah = ah;
397         *msg = m;
398         return 0;
399 }
400
401 static void cm_free_msg(struct ib_mad_send_buf *msg)
402 {
403         rdma_destroy_ah(msg->ah);
404         if (msg->context[0])
405                 cm_deref_id(msg->context[0]);
406         ib_free_send_mad(msg);
407 }
408
409 static void * cm_copy_private_data(const void *private_data,
410                                    u8 private_data_len)
411 {
412         void *data;
413
414         if (!private_data || !private_data_len)
415                 return NULL;
416
417         data = kmemdup(private_data, private_data_len, GFP_KERNEL);
418         if (!data)
419                 return ERR_PTR(-ENOMEM);
420
421         return data;
422 }
423
424 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
425                                  void *private_data, u8 private_data_len)
426 {
427         if (cm_id_priv->private_data && cm_id_priv->private_data_len)
428                 kfree(cm_id_priv->private_data);
429
430         cm_id_priv->private_data = private_data;
431         cm_id_priv->private_data_len = private_data_len;
432 }
433
434 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
435                                     struct ib_grh *grh, struct cm_av *av)
436 {
437         av->port = port;
438         av->pkey_index = wc->pkey_index;
439         ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
440                            grh, &av->ah_attr);
441 }
442
443 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
444                               struct cm_id_private *cm_id_priv)
445 {
446         struct cm_device *cm_dev;
447         struct cm_port *port = NULL;
448         unsigned long flags;
449         int ret;
450         u8 p;
451         struct net_device *ndev = ib_get_ndev_from_path(path);
452
453         read_lock_irqsave(&cm.device_lock, flags);
454         list_for_each_entry(cm_dev, &cm.device_list, list) {
455                 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
456                                         path->gid_type, ndev, &p, NULL)) {
457                         port = cm_dev->port[p-1];
458                         break;
459                 }
460         }
461         read_unlock_irqrestore(&cm.device_lock, flags);
462
463         if (ndev)
464                 dev_put(ndev);
465
466         if (!port)
467                 return -EINVAL;
468
469         ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
470                                   be16_to_cpu(path->pkey), &av->pkey_index);
471         if (ret)
472                 return ret;
473
474         av->port = port;
475         ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
476                              &av->ah_attr);
477         av->timeout = path->packet_life_time + 1;
478
479         spin_lock_irqsave(&cm.lock, flags);
480         if (&cm_id_priv->av == av)
481                 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
482         else if (&cm_id_priv->alt_av == av)
483                 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
484         else
485                 ret = -EINVAL;
486
487         spin_unlock_irqrestore(&cm.lock, flags);
488
489         return ret;
490 }
491
492 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
493 {
494         unsigned long flags;
495         int id;
496
497         idr_preload(GFP_KERNEL);
498         spin_lock_irqsave(&cm.lock, flags);
499
500         id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
501
502         spin_unlock_irqrestore(&cm.lock, flags);
503         idr_preload_end();
504
505         cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
506         return id < 0 ? id : 0;
507 }
508
509 static void cm_free_id(__be32 local_id)
510 {
511         spin_lock_irq(&cm.lock);
512         idr_remove(&cm.local_id_table,
513                    (__force int) (local_id ^ cm.random_id_operand));
514         spin_unlock_irq(&cm.lock);
515 }
516
517 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
518 {
519         struct cm_id_private *cm_id_priv;
520
521         cm_id_priv = idr_find(&cm.local_id_table,
522                               (__force int) (local_id ^ cm.random_id_operand));
523         if (cm_id_priv) {
524                 if (cm_id_priv->id.remote_id == remote_id)
525                         atomic_inc(&cm_id_priv->refcount);
526                 else
527                         cm_id_priv = NULL;
528         }
529
530         return cm_id_priv;
531 }
532
533 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
534 {
535         struct cm_id_private *cm_id_priv;
536
537         spin_lock_irq(&cm.lock);
538         cm_id_priv = cm_get_id(local_id, remote_id);
539         spin_unlock_irq(&cm.lock);
540
541         return cm_id_priv;
542 }
543
544 /*
545  * Trivial helpers to strip endian annotation and compare; the
546  * endianness doesn't actually matter since we just need a stable
547  * order for the RB tree.
548  */
549 static int be32_lt(__be32 a, __be32 b)
550 {
551         return (__force u32) a < (__force u32) b;
552 }
553
554 static int be32_gt(__be32 a, __be32 b)
555 {
556         return (__force u32) a > (__force u32) b;
557 }
558
559 static int be64_lt(__be64 a, __be64 b)
560 {
561         return (__force u64) a < (__force u64) b;
562 }
563
564 static int be64_gt(__be64 a, __be64 b)
565 {
566         return (__force u64) a > (__force u64) b;
567 }
568
569 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
570 {
571         struct rb_node **link = &cm.listen_service_table.rb_node;
572         struct rb_node *parent = NULL;
573         struct cm_id_private *cur_cm_id_priv;
574         __be64 service_id = cm_id_priv->id.service_id;
575         __be64 service_mask = cm_id_priv->id.service_mask;
576
577         while (*link) {
578                 parent = *link;
579                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
580                                           service_node);
581                 if ((cur_cm_id_priv->id.service_mask & service_id) ==
582                     (service_mask & cur_cm_id_priv->id.service_id) &&
583                     (cm_id_priv->id.device == cur_cm_id_priv->id.device))
584                         return cur_cm_id_priv;
585
586                 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
587                         link = &(*link)->rb_left;
588                 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
589                         link = &(*link)->rb_right;
590                 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
591                         link = &(*link)->rb_left;
592                 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
593                         link = &(*link)->rb_right;
594                 else
595                         link = &(*link)->rb_right;
596         }
597         rb_link_node(&cm_id_priv->service_node, parent, link);
598         rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
599         return NULL;
600 }
601
602 static struct cm_id_private * cm_find_listen(struct ib_device *device,
603                                              __be64 service_id)
604 {
605         struct rb_node *node = cm.listen_service_table.rb_node;
606         struct cm_id_private *cm_id_priv;
607
608         while (node) {
609                 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
610                 if ((cm_id_priv->id.service_mask & service_id) ==
611                      cm_id_priv->id.service_id &&
612                     (cm_id_priv->id.device == device))
613                         return cm_id_priv;
614
615                 if (device < cm_id_priv->id.device)
616                         node = node->rb_left;
617                 else if (device > cm_id_priv->id.device)
618                         node = node->rb_right;
619                 else if (be64_lt(service_id, cm_id_priv->id.service_id))
620                         node = node->rb_left;
621                 else if (be64_gt(service_id, cm_id_priv->id.service_id))
622                         node = node->rb_right;
623                 else
624                         node = node->rb_right;
625         }
626         return NULL;
627 }
628
629 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
630                                                      *timewait_info)
631 {
632         struct rb_node **link = &cm.remote_id_table.rb_node;
633         struct rb_node *parent = NULL;
634         struct cm_timewait_info *cur_timewait_info;
635         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
636         __be32 remote_id = timewait_info->work.remote_id;
637
638         while (*link) {
639                 parent = *link;
640                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
641                                              remote_id_node);
642                 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
643                         link = &(*link)->rb_left;
644                 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
645                         link = &(*link)->rb_right;
646                 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
647                         link = &(*link)->rb_left;
648                 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
649                         link = &(*link)->rb_right;
650                 else
651                         return cur_timewait_info;
652         }
653         timewait_info->inserted_remote_id = 1;
654         rb_link_node(&timewait_info->remote_id_node, parent, link);
655         rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
656         return NULL;
657 }
658
659 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
660                                                    __be32 remote_id)
661 {
662         struct rb_node *node = cm.remote_id_table.rb_node;
663         struct cm_timewait_info *timewait_info;
664
665         while (node) {
666                 timewait_info = rb_entry(node, struct cm_timewait_info,
667                                          remote_id_node);
668                 if (be32_lt(remote_id, timewait_info->work.remote_id))
669                         node = node->rb_left;
670                 else if (be32_gt(remote_id, timewait_info->work.remote_id))
671                         node = node->rb_right;
672                 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
673                         node = node->rb_left;
674                 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
675                         node = node->rb_right;
676                 else
677                         return timewait_info;
678         }
679         return NULL;
680 }
681
682 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
683                                                       *timewait_info)
684 {
685         struct rb_node **link = &cm.remote_qp_table.rb_node;
686         struct rb_node *parent = NULL;
687         struct cm_timewait_info *cur_timewait_info;
688         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
689         __be32 remote_qpn = timewait_info->remote_qpn;
690
691         while (*link) {
692                 parent = *link;
693                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
694                                              remote_qp_node);
695                 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
696                         link = &(*link)->rb_left;
697                 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
698                         link = &(*link)->rb_right;
699                 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
700                         link = &(*link)->rb_left;
701                 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
702                         link = &(*link)->rb_right;
703                 else
704                         return cur_timewait_info;
705         }
706         timewait_info->inserted_remote_qp = 1;
707         rb_link_node(&timewait_info->remote_qp_node, parent, link);
708         rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
709         return NULL;
710 }
711
712 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
713                                                     *cm_id_priv)
714 {
715         struct rb_node **link = &cm.remote_sidr_table.rb_node;
716         struct rb_node *parent = NULL;
717         struct cm_id_private *cur_cm_id_priv;
718         union ib_gid *port_gid = &cm_id_priv->av.dgid;
719         __be32 remote_id = cm_id_priv->id.remote_id;
720
721         while (*link) {
722                 parent = *link;
723                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
724                                           sidr_id_node);
725                 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
726                         link = &(*link)->rb_left;
727                 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
728                         link = &(*link)->rb_right;
729                 else {
730                         int cmp;
731                         cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
732                                      sizeof *port_gid);
733                         if (cmp < 0)
734                                 link = &(*link)->rb_left;
735                         else if (cmp > 0)
736                                 link = &(*link)->rb_right;
737                         else
738                                 return cur_cm_id_priv;
739                 }
740         }
741         rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
742         rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
743         return NULL;
744 }
745
746 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
747                                enum ib_cm_sidr_status status)
748 {
749         struct ib_cm_sidr_rep_param param;
750
751         memset(&param, 0, sizeof param);
752         param.status = status;
753         ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
754 }
755
756 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
757                                  ib_cm_handler cm_handler,
758                                  void *context)
759 {
760         struct cm_id_private *cm_id_priv;
761         int ret;
762
763         cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
764         if (!cm_id_priv)
765                 return ERR_PTR(-ENOMEM);
766
767         cm_id_priv->id.state = IB_CM_IDLE;
768         cm_id_priv->id.device = device;
769         cm_id_priv->id.cm_handler = cm_handler;
770         cm_id_priv->id.context = context;
771         cm_id_priv->id.remote_cm_qpn = 1;
772         ret = cm_alloc_id(cm_id_priv);
773         if (ret)
774                 goto error;
775
776         spin_lock_init(&cm_id_priv->lock);
777         init_completion(&cm_id_priv->comp);
778         INIT_LIST_HEAD(&cm_id_priv->work_list);
779         INIT_LIST_HEAD(&cm_id_priv->prim_list);
780         INIT_LIST_HEAD(&cm_id_priv->altr_list);
781         atomic_set(&cm_id_priv->work_count, -1);
782         atomic_set(&cm_id_priv->refcount, 1);
783         return &cm_id_priv->id;
784
785 error:
786         kfree(cm_id_priv);
787         return ERR_PTR(-ENOMEM);
788 }
789 EXPORT_SYMBOL(ib_create_cm_id);
790
791 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
792 {
793         struct cm_work *work;
794
795         if (list_empty(&cm_id_priv->work_list))
796                 return NULL;
797
798         work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
799         list_del(&work->list);
800         return work;
801 }
802
803 static void cm_free_work(struct cm_work *work)
804 {
805         if (work->mad_recv_wc)
806                 ib_free_recv_mad(work->mad_recv_wc);
807         kfree(work);
808 }
809
810 static inline int cm_convert_to_ms(int iba_time)
811 {
812         /* approximate conversion to ms from 4.096us x 2^iba_time */
813         return 1 << max(iba_time - 8, 0);
814 }
815
816 /*
817  * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
818  * Because of how ack_timeout is stored, adding one doubles the timeout.
819  * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
820  * increment it (round up) only if the other is within 50%.
821  */
822 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
823 {
824         int ack_timeout = packet_life_time + 1;
825
826         if (ack_timeout >= ca_ack_delay)
827                 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
828         else
829                 ack_timeout = ca_ack_delay +
830                               (ack_timeout >= (ca_ack_delay - 1));
831
832         return min(31, ack_timeout);
833 }
834
835 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
836 {
837         if (timewait_info->inserted_remote_id) {
838                 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
839                 timewait_info->inserted_remote_id = 0;
840         }
841
842         if (timewait_info->inserted_remote_qp) {
843                 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
844                 timewait_info->inserted_remote_qp = 0;
845         }
846 }
847
848 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
849 {
850         struct cm_timewait_info *timewait_info;
851
852         timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
853         if (!timewait_info)
854                 return ERR_PTR(-ENOMEM);
855
856         timewait_info->work.local_id = local_id;
857         INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
858         timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
859         return timewait_info;
860 }
861
862 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
863 {
864         int wait_time;
865         unsigned long flags;
866         struct cm_device *cm_dev;
867
868         cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
869         if (!cm_dev)
870                 return;
871
872         spin_lock_irqsave(&cm.lock, flags);
873         cm_cleanup_timewait(cm_id_priv->timewait_info);
874         list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
875         spin_unlock_irqrestore(&cm.lock, flags);
876
877         /*
878          * The cm_id could be destroyed by the user before we exit timewait.
879          * To protect against this, we search for the cm_id after exiting
880          * timewait before notifying the user that we've exited timewait.
881          */
882         cm_id_priv->id.state = IB_CM_TIMEWAIT;
883         wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
884
885         /* Check if the device started its remove_one */
886         spin_lock_irqsave(&cm.lock, flags);
887         if (!cm_dev->going_down)
888                 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
889                                    msecs_to_jiffies(wait_time));
890         spin_unlock_irqrestore(&cm.lock, flags);
891
892         cm_id_priv->timewait_info = NULL;
893 }
894
895 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
896 {
897         unsigned long flags;
898
899         cm_id_priv->id.state = IB_CM_IDLE;
900         if (cm_id_priv->timewait_info) {
901                 spin_lock_irqsave(&cm.lock, flags);
902                 cm_cleanup_timewait(cm_id_priv->timewait_info);
903                 spin_unlock_irqrestore(&cm.lock, flags);
904                 kfree(cm_id_priv->timewait_info);
905                 cm_id_priv->timewait_info = NULL;
906         }
907 }
908
909 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
910 {
911         struct cm_id_private *cm_id_priv;
912         struct cm_work *work;
913
914         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
915 retest:
916         spin_lock_irq(&cm_id_priv->lock);
917         switch (cm_id->state) {
918         case IB_CM_LISTEN:
919                 spin_unlock_irq(&cm_id_priv->lock);
920
921                 spin_lock_irq(&cm.lock);
922                 if (--cm_id_priv->listen_sharecount > 0) {
923                         /* The id is still shared. */
924                         cm_deref_id(cm_id_priv);
925                         spin_unlock_irq(&cm.lock);
926                         return;
927                 }
928                 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
929                 spin_unlock_irq(&cm.lock);
930                 break;
931         case IB_CM_SIDR_REQ_SENT:
932                 cm_id->state = IB_CM_IDLE;
933                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
934                 spin_unlock_irq(&cm_id_priv->lock);
935                 break;
936         case IB_CM_SIDR_REQ_RCVD:
937                 spin_unlock_irq(&cm_id_priv->lock);
938                 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
939                 spin_lock_irq(&cm.lock);
940                 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
941                         rb_erase(&cm_id_priv->sidr_id_node,
942                                  &cm.remote_sidr_table);
943                 spin_unlock_irq(&cm.lock);
944                 break;
945         case IB_CM_REQ_SENT:
946         case IB_CM_MRA_REQ_RCVD:
947                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
948                 spin_unlock_irq(&cm_id_priv->lock);
949                 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
950                                &cm_id_priv->id.device->node_guid,
951                                sizeof cm_id_priv->id.device->node_guid,
952                                NULL, 0);
953                 break;
954         case IB_CM_REQ_RCVD:
955                 if (err == -ENOMEM) {
956                         /* Do not reject to allow future retries. */
957                         cm_reset_to_idle(cm_id_priv);
958                         spin_unlock_irq(&cm_id_priv->lock);
959                 } else {
960                         spin_unlock_irq(&cm_id_priv->lock);
961                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
962                                        NULL, 0, NULL, 0);
963                 }
964                 break;
965         case IB_CM_REP_SENT:
966         case IB_CM_MRA_REP_RCVD:
967                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
968                 /* Fall through */
969         case IB_CM_MRA_REQ_SENT:
970         case IB_CM_REP_RCVD:
971         case IB_CM_MRA_REP_SENT:
972                 spin_unlock_irq(&cm_id_priv->lock);
973                 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
974                                NULL, 0, NULL, 0);
975                 break;
976         case IB_CM_ESTABLISHED:
977                 spin_unlock_irq(&cm_id_priv->lock);
978                 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
979                         break;
980                 ib_send_cm_dreq(cm_id, NULL, 0);
981                 goto retest;
982         case IB_CM_DREQ_SENT:
983                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
984                 cm_enter_timewait(cm_id_priv);
985                 spin_unlock_irq(&cm_id_priv->lock);
986                 break;
987         case IB_CM_DREQ_RCVD:
988                 spin_unlock_irq(&cm_id_priv->lock);
989                 ib_send_cm_drep(cm_id, NULL, 0);
990                 break;
991         default:
992                 spin_unlock_irq(&cm_id_priv->lock);
993                 break;
994         }
995
996         spin_lock_irq(&cm.lock);
997         if (!list_empty(&cm_id_priv->altr_list) &&
998             (!cm_id_priv->altr_send_port_not_ready))
999                 list_del(&cm_id_priv->altr_list);
1000         if (!list_empty(&cm_id_priv->prim_list) &&
1001             (!cm_id_priv->prim_send_port_not_ready))
1002                 list_del(&cm_id_priv->prim_list);
1003         spin_unlock_irq(&cm.lock);
1004
1005         cm_free_id(cm_id->local_id);
1006         cm_deref_id(cm_id_priv);
1007         wait_for_completion(&cm_id_priv->comp);
1008         while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1009                 cm_free_work(work);
1010         kfree(cm_id_priv->private_data);
1011         kfree(cm_id_priv);
1012 }
1013
1014 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1015 {
1016         cm_destroy_id(cm_id, 0);
1017 }
1018 EXPORT_SYMBOL(ib_destroy_cm_id);
1019
1020 /**
1021  * __ib_cm_listen - Initiates listening on the specified service ID for
1022  *   connection and service ID resolution requests.
1023  * @cm_id: Connection identifier associated with the listen request.
1024  * @service_id: Service identifier matched against incoming connection
1025  *   and service ID resolution requests.  The service ID should be specified
1026  *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1027  *   assign a service ID to the caller.
1028  * @service_mask: Mask applied to service ID used to listen across a
1029  *   range of service IDs.  If set to 0, the service ID is matched
1030  *   exactly.  This parameter is ignored if %service_id is set to
1031  *   IB_CM_ASSIGN_SERVICE_ID.
1032  */
1033 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1034                           __be64 service_mask)
1035 {
1036         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1037         int ret = 0;
1038
1039         service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1040         service_id &= service_mask;
1041         if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1042             (service_id != IB_CM_ASSIGN_SERVICE_ID))
1043                 return -EINVAL;
1044
1045         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1046         if (cm_id->state != IB_CM_IDLE)
1047                 return -EINVAL;
1048
1049         cm_id->state = IB_CM_LISTEN;
1050         ++cm_id_priv->listen_sharecount;
1051
1052         if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1053                 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1054                 cm_id->service_mask = ~cpu_to_be64(0);
1055         } else {
1056                 cm_id->service_id = service_id;
1057                 cm_id->service_mask = service_mask;
1058         }
1059         cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1060
1061         if (cur_cm_id_priv) {
1062                 cm_id->state = IB_CM_IDLE;
1063                 --cm_id_priv->listen_sharecount;
1064                 ret = -EBUSY;
1065         }
1066         return ret;
1067 }
1068
1069 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1070 {
1071         unsigned long flags;
1072         int ret;
1073
1074         spin_lock_irqsave(&cm.lock, flags);
1075         ret = __ib_cm_listen(cm_id, service_id, service_mask);
1076         spin_unlock_irqrestore(&cm.lock, flags);
1077
1078         return ret;
1079 }
1080 EXPORT_SYMBOL(ib_cm_listen);
1081
1082 /**
1083  * Create a new listening ib_cm_id and listen on the given service ID.
1084  *
1085  * If there's an existing ID listening on that same device and service ID,
1086  * return it.
1087  *
1088  * @device: Device associated with the cm_id.  All related communication will
1089  * be associated with the specified device.
1090  * @cm_handler: Callback invoked to notify the user of CM events.
1091  * @service_id: Service identifier matched against incoming connection
1092  *   and service ID resolution requests.  The service ID should be specified
1093  *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1094  *   assign a service ID to the caller.
1095  *
1096  * Callers should call ib_destroy_cm_id when done with the listener ID.
1097  */
1098 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1099                                      ib_cm_handler cm_handler,
1100                                      __be64 service_id)
1101 {
1102         struct cm_id_private *cm_id_priv;
1103         struct ib_cm_id *cm_id;
1104         unsigned long flags;
1105         int err = 0;
1106
1107         /* Create an ID in advance, since the creation may sleep */
1108         cm_id = ib_create_cm_id(device, cm_handler, NULL);
1109         if (IS_ERR(cm_id))
1110                 return cm_id;
1111
1112         spin_lock_irqsave(&cm.lock, flags);
1113
1114         if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1115                 goto new_id;
1116
1117         /* Find an existing ID */
1118         cm_id_priv = cm_find_listen(device, service_id);
1119         if (cm_id_priv) {
1120                 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1121                         /* Sharing an ib_cm_id with different handlers is not
1122                          * supported */
1123                         spin_unlock_irqrestore(&cm.lock, flags);
1124                         return ERR_PTR(-EINVAL);
1125                 }
1126                 atomic_inc(&cm_id_priv->refcount);
1127                 ++cm_id_priv->listen_sharecount;
1128                 spin_unlock_irqrestore(&cm.lock, flags);
1129
1130                 ib_destroy_cm_id(cm_id);
1131                 cm_id = &cm_id_priv->id;
1132                 return cm_id;
1133         }
1134
1135 new_id:
1136         /* Use newly created ID */
1137         err = __ib_cm_listen(cm_id, service_id, 0);
1138
1139         spin_unlock_irqrestore(&cm.lock, flags);
1140
1141         if (err) {
1142                 ib_destroy_cm_id(cm_id);
1143                 return ERR_PTR(err);
1144         }
1145         return cm_id;
1146 }
1147 EXPORT_SYMBOL(ib_cm_insert_listen);
1148
1149 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1150                           enum cm_msg_sequence msg_seq)
1151 {
1152         u64 hi_tid, low_tid;
1153
1154         hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1155         low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
1156                           (msg_seq << 30));
1157         return cpu_to_be64(hi_tid | low_tid);
1158 }
1159
1160 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1161                               __be16 attr_id, __be64 tid)
1162 {
1163         hdr->base_version  = IB_MGMT_BASE_VERSION;
1164         hdr->mgmt_class    = IB_MGMT_CLASS_CM;
1165         hdr->class_version = IB_CM_CLASS_VERSION;
1166         hdr->method        = IB_MGMT_METHOD_SEND;
1167         hdr->attr_id       = attr_id;
1168         hdr->tid           = tid;
1169 }
1170
1171 static void cm_format_req(struct cm_req_msg *req_msg,
1172                           struct cm_id_private *cm_id_priv,
1173                           struct ib_cm_req_param *param)
1174 {
1175         struct ib_sa_path_rec *pri_path = param->primary_path;
1176         struct ib_sa_path_rec *alt_path = param->alternate_path;
1177
1178         cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1179                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1180
1181         req_msg->local_comm_id = cm_id_priv->id.local_id;
1182         req_msg->service_id = param->service_id;
1183         req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1184         cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1185         cm_req_set_init_depth(req_msg, param->initiator_depth);
1186         cm_req_set_remote_resp_timeout(req_msg,
1187                                        param->remote_cm_response_timeout);
1188         cm_req_set_qp_type(req_msg, param->qp_type);
1189         cm_req_set_flow_ctrl(req_msg, param->flow_control);
1190         cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1191         cm_req_set_local_resp_timeout(req_msg,
1192                                       param->local_cm_response_timeout);
1193         req_msg->pkey = param->primary_path->pkey;
1194         cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1195         cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1196
1197         if (param->qp_type != IB_QPT_XRC_INI) {
1198                 cm_req_set_resp_res(req_msg, param->responder_resources);
1199                 cm_req_set_retry_count(req_msg, param->retry_count);
1200                 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1201                 cm_req_set_srq(req_msg, param->srq);
1202         }
1203
1204         if (pri_path->hop_limit <= 1) {
1205                 req_msg->primary_local_lid = pri_path->slid;
1206                 req_msg->primary_remote_lid = pri_path->dlid;
1207         } else {
1208                 /* Work-around until there's a way to obtain remote LID info */
1209                 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1210                 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1211         }
1212         req_msg->primary_local_gid = pri_path->sgid;
1213         req_msg->primary_remote_gid = pri_path->dgid;
1214         cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1215         cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1216         req_msg->primary_traffic_class = pri_path->traffic_class;
1217         req_msg->primary_hop_limit = pri_path->hop_limit;
1218         cm_req_set_primary_sl(req_msg, pri_path->sl);
1219         cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1220         cm_req_set_primary_local_ack_timeout(req_msg,
1221                 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1222                                pri_path->packet_life_time));
1223
1224         if (alt_path) {
1225                 if (alt_path->hop_limit <= 1) {
1226                         req_msg->alt_local_lid = alt_path->slid;
1227                         req_msg->alt_remote_lid = alt_path->dlid;
1228                 } else {
1229                         req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1230                         req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1231                 }
1232                 req_msg->alt_local_gid = alt_path->sgid;
1233                 req_msg->alt_remote_gid = alt_path->dgid;
1234                 cm_req_set_alt_flow_label(req_msg,
1235                                           alt_path->flow_label);
1236                 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1237                 req_msg->alt_traffic_class = alt_path->traffic_class;
1238                 req_msg->alt_hop_limit = alt_path->hop_limit;
1239                 cm_req_set_alt_sl(req_msg, alt_path->sl);
1240                 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1241                 cm_req_set_alt_local_ack_timeout(req_msg,
1242                         cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1243                                        alt_path->packet_life_time));
1244         }
1245
1246         if (param->private_data && param->private_data_len)
1247                 memcpy(req_msg->private_data, param->private_data,
1248                        param->private_data_len);
1249 }
1250
1251 static int cm_validate_req_param(struct ib_cm_req_param *param)
1252 {
1253         /* peer-to-peer not supported */
1254         if (param->peer_to_peer)
1255                 return -EINVAL;
1256
1257         if (!param->primary_path)
1258                 return -EINVAL;
1259
1260         if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1261             param->qp_type != IB_QPT_XRC_INI)
1262                 return -EINVAL;
1263
1264         if (param->private_data &&
1265             param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1266                 return -EINVAL;
1267
1268         if (param->alternate_path &&
1269             (param->alternate_path->pkey != param->primary_path->pkey ||
1270              param->alternate_path->mtu != param->primary_path->mtu))
1271                 return -EINVAL;
1272
1273         return 0;
1274 }
1275
1276 int ib_send_cm_req(struct ib_cm_id *cm_id,
1277                    struct ib_cm_req_param *param)
1278 {
1279         struct cm_id_private *cm_id_priv;
1280         struct cm_req_msg *req_msg;
1281         unsigned long flags;
1282         int ret;
1283
1284         ret = cm_validate_req_param(param);
1285         if (ret)
1286                 return ret;
1287
1288         /* Verify that we're not in timewait. */
1289         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1290         spin_lock_irqsave(&cm_id_priv->lock, flags);
1291         if (cm_id->state != IB_CM_IDLE) {
1292                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1293                 ret = -EINVAL;
1294                 goto out;
1295         }
1296         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1297
1298         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1299                                                             id.local_id);
1300         if (IS_ERR(cm_id_priv->timewait_info)) {
1301                 ret = PTR_ERR(cm_id_priv->timewait_info);
1302                 goto out;
1303         }
1304
1305         ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1306                                  cm_id_priv);
1307         if (ret)
1308                 goto error1;
1309         if (param->alternate_path) {
1310                 ret = cm_init_av_by_path(param->alternate_path,
1311                                          &cm_id_priv->alt_av, cm_id_priv);
1312                 if (ret)
1313                         goto error1;
1314         }
1315         cm_id->service_id = param->service_id;
1316         cm_id->service_mask = ~cpu_to_be64(0);
1317         cm_id_priv->timeout_ms = cm_convert_to_ms(
1318                                     param->primary_path->packet_life_time) * 2 +
1319                                  cm_convert_to_ms(
1320                                     param->remote_cm_response_timeout);
1321         cm_id_priv->max_cm_retries = param->max_cm_retries;
1322         cm_id_priv->initiator_depth = param->initiator_depth;
1323         cm_id_priv->responder_resources = param->responder_resources;
1324         cm_id_priv->retry_count = param->retry_count;
1325         cm_id_priv->path_mtu = param->primary_path->mtu;
1326         cm_id_priv->pkey = param->primary_path->pkey;
1327         cm_id_priv->qp_type = param->qp_type;
1328
1329         ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1330         if (ret)
1331                 goto error1;
1332
1333         req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1334         cm_format_req(req_msg, cm_id_priv, param);
1335         cm_id_priv->tid = req_msg->hdr.tid;
1336         cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1337         cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1338
1339         cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1340         cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1341
1342         spin_lock_irqsave(&cm_id_priv->lock, flags);
1343         ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1344         if (ret) {
1345                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1346                 goto error2;
1347         }
1348         BUG_ON(cm_id->state != IB_CM_IDLE);
1349         cm_id->state = IB_CM_REQ_SENT;
1350         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1351         return 0;
1352
1353 error2: cm_free_msg(cm_id_priv->msg);
1354 error1: kfree(cm_id_priv->timewait_info);
1355 out:    return ret;
1356 }
1357 EXPORT_SYMBOL(ib_send_cm_req);
1358
1359 static int cm_issue_rej(struct cm_port *port,
1360                         struct ib_mad_recv_wc *mad_recv_wc,
1361                         enum ib_cm_rej_reason reason,
1362                         enum cm_msg_response msg_rejected,
1363                         void *ari, u8 ari_length)
1364 {
1365         struct ib_mad_send_buf *msg = NULL;
1366         struct cm_rej_msg *rej_msg, *rcv_msg;
1367         int ret;
1368
1369         ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1370         if (ret)
1371                 return ret;
1372
1373         /* We just need common CM header information.  Cast to any message. */
1374         rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1375         rej_msg = (struct cm_rej_msg *) msg->mad;
1376
1377         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1378         rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1379         rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1380         cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1381         rej_msg->reason = cpu_to_be16(reason);
1382
1383         if (ari && ari_length) {
1384                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1385                 memcpy(rej_msg->ari, ari, ari_length);
1386         }
1387
1388         ret = ib_post_send_mad(msg, NULL);
1389         if (ret)
1390                 cm_free_msg(msg);
1391
1392         return ret;
1393 }
1394
1395 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1396                                     __be32 local_qpn, __be32 remote_qpn)
1397 {
1398         return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1399                 ((local_ca_guid == remote_ca_guid) &&
1400                  (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1401 }
1402
1403 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1404                                             struct ib_sa_path_rec *primary_path,
1405                                             struct ib_sa_path_rec *alt_path)
1406 {
1407         memset(primary_path, 0, sizeof *primary_path);
1408         primary_path->dgid = req_msg->primary_local_gid;
1409         primary_path->sgid = req_msg->primary_remote_gid;
1410         primary_path->dlid = req_msg->primary_local_lid;
1411         primary_path->slid = req_msg->primary_remote_lid;
1412         primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1413         primary_path->hop_limit = req_msg->primary_hop_limit;
1414         primary_path->traffic_class = req_msg->primary_traffic_class;
1415         primary_path->reversible = 1;
1416         primary_path->pkey = req_msg->pkey;
1417         primary_path->sl = cm_req_get_primary_sl(req_msg);
1418         primary_path->mtu_selector = IB_SA_EQ;
1419         primary_path->mtu = cm_req_get_path_mtu(req_msg);
1420         primary_path->rate_selector = IB_SA_EQ;
1421         primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1422         primary_path->packet_life_time_selector = IB_SA_EQ;
1423         primary_path->packet_life_time =
1424                 cm_req_get_primary_local_ack_timeout(req_msg);
1425         primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1426         primary_path->service_id = req_msg->service_id;
1427
1428         if (req_msg->alt_local_lid) {
1429                 memset(alt_path, 0, sizeof *alt_path);
1430                 alt_path->dgid = req_msg->alt_local_gid;
1431                 alt_path->sgid = req_msg->alt_remote_gid;
1432                 alt_path->dlid = req_msg->alt_local_lid;
1433                 alt_path->slid = req_msg->alt_remote_lid;
1434                 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1435                 alt_path->hop_limit = req_msg->alt_hop_limit;
1436                 alt_path->traffic_class = req_msg->alt_traffic_class;
1437                 alt_path->reversible = 1;
1438                 alt_path->pkey = req_msg->pkey;
1439                 alt_path->sl = cm_req_get_alt_sl(req_msg);
1440                 alt_path->mtu_selector = IB_SA_EQ;
1441                 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1442                 alt_path->rate_selector = IB_SA_EQ;
1443                 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1444                 alt_path->packet_life_time_selector = IB_SA_EQ;
1445                 alt_path->packet_life_time =
1446                         cm_req_get_alt_local_ack_timeout(req_msg);
1447                 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1448                 alt_path->service_id = req_msg->service_id;
1449         }
1450 }
1451
1452 static u16 cm_get_bth_pkey(struct cm_work *work)
1453 {
1454         struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1455         u8 port_num = work->port->port_num;
1456         u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1457         u16 pkey;
1458         int ret;
1459
1460         ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1461         if (ret) {
1462                 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1463                                      port_num, pkey_index, ret);
1464                 return 0;
1465         }
1466
1467         return pkey;
1468 }
1469
1470 static void cm_format_req_event(struct cm_work *work,
1471                                 struct cm_id_private *cm_id_priv,
1472                                 struct ib_cm_id *listen_id)
1473 {
1474         struct cm_req_msg *req_msg;
1475         struct ib_cm_req_event_param *param;
1476
1477         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1478         param = &work->cm_event.param.req_rcvd;
1479         param->listen_id = listen_id;
1480         param->bth_pkey = cm_get_bth_pkey(work);
1481         param->port = cm_id_priv->av.port->port_num;
1482         param->primary_path = &work->path[0];
1483         if (req_msg->alt_local_lid)
1484                 param->alternate_path = &work->path[1];
1485         else
1486                 param->alternate_path = NULL;
1487         param->remote_ca_guid = req_msg->local_ca_guid;
1488         param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1489         param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1490         param->qp_type = cm_req_get_qp_type(req_msg);
1491         param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1492         param->responder_resources = cm_req_get_init_depth(req_msg);
1493         param->initiator_depth = cm_req_get_resp_res(req_msg);
1494         param->local_cm_response_timeout =
1495                                         cm_req_get_remote_resp_timeout(req_msg);
1496         param->flow_control = cm_req_get_flow_ctrl(req_msg);
1497         param->remote_cm_response_timeout =
1498                                         cm_req_get_local_resp_timeout(req_msg);
1499         param->retry_count = cm_req_get_retry_count(req_msg);
1500         param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1501         param->srq = cm_req_get_srq(req_msg);
1502         work->cm_event.private_data = &req_msg->private_data;
1503 }
1504
1505 static void cm_process_work(struct cm_id_private *cm_id_priv,
1506                             struct cm_work *work)
1507 {
1508         int ret;
1509
1510         /* We will typically only have the current event to report. */
1511         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1512         cm_free_work(work);
1513
1514         while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1515                 spin_lock_irq(&cm_id_priv->lock);
1516                 work = cm_dequeue_work(cm_id_priv);
1517                 spin_unlock_irq(&cm_id_priv->lock);
1518                 BUG_ON(!work);
1519                 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1520                                                 &work->cm_event);
1521                 cm_free_work(work);
1522         }
1523         cm_deref_id(cm_id_priv);
1524         if (ret)
1525                 cm_destroy_id(&cm_id_priv->id, ret);
1526 }
1527
1528 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1529                           struct cm_id_private *cm_id_priv,
1530                           enum cm_msg_response msg_mraed, u8 service_timeout,
1531                           const void *private_data, u8 private_data_len)
1532 {
1533         cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1534         cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1535         mra_msg->local_comm_id = cm_id_priv->id.local_id;
1536         mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1537         cm_mra_set_service_timeout(mra_msg, service_timeout);
1538
1539         if (private_data && private_data_len)
1540                 memcpy(mra_msg->private_data, private_data, private_data_len);
1541 }
1542
1543 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1544                           struct cm_id_private *cm_id_priv,
1545                           enum ib_cm_rej_reason reason,
1546                           void *ari,
1547                           u8 ari_length,
1548                           const void *private_data,
1549                           u8 private_data_len)
1550 {
1551         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1552         rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1553
1554         switch(cm_id_priv->id.state) {
1555         case IB_CM_REQ_RCVD:
1556                 rej_msg->local_comm_id = 0;
1557                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1558                 break;
1559         case IB_CM_MRA_REQ_SENT:
1560                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1561                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1562                 break;
1563         case IB_CM_REP_RCVD:
1564         case IB_CM_MRA_REP_SENT:
1565                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1566                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1567                 break;
1568         default:
1569                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1570                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1571                 break;
1572         }
1573
1574         rej_msg->reason = cpu_to_be16(reason);
1575         if (ari && ari_length) {
1576                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1577                 memcpy(rej_msg->ari, ari, ari_length);
1578         }
1579
1580         if (private_data && private_data_len)
1581                 memcpy(rej_msg->private_data, private_data, private_data_len);
1582 }
1583
1584 static void cm_dup_req_handler(struct cm_work *work,
1585                                struct cm_id_private *cm_id_priv)
1586 {
1587         struct ib_mad_send_buf *msg = NULL;
1588         int ret;
1589
1590         atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1591                         counter[CM_REQ_COUNTER]);
1592
1593         /* Quick state check to discard duplicate REQs. */
1594         if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1595                 return;
1596
1597         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1598         if (ret)
1599                 return;
1600
1601         spin_lock_irq(&cm_id_priv->lock);
1602         switch (cm_id_priv->id.state) {
1603         case IB_CM_MRA_REQ_SENT:
1604                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1605                               CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1606                               cm_id_priv->private_data,
1607                               cm_id_priv->private_data_len);
1608                 break;
1609         case IB_CM_TIMEWAIT:
1610                 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1611                               IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1612                 break;
1613         default:
1614                 goto unlock;
1615         }
1616         spin_unlock_irq(&cm_id_priv->lock);
1617
1618         ret = ib_post_send_mad(msg, NULL);
1619         if (ret)
1620                 goto free;
1621         return;
1622
1623 unlock: spin_unlock_irq(&cm_id_priv->lock);
1624 free:   cm_free_msg(msg);
1625 }
1626
1627 static struct cm_id_private * cm_match_req(struct cm_work *work,
1628                                            struct cm_id_private *cm_id_priv)
1629 {
1630         struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1631         struct cm_timewait_info *timewait_info;
1632         struct cm_req_msg *req_msg;
1633         struct ib_cm_id *cm_id;
1634
1635         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1636
1637         /* Check for possible duplicate REQ. */
1638         spin_lock_irq(&cm.lock);
1639         timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1640         if (timewait_info) {
1641                 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1642                                            timewait_info->work.remote_id);
1643                 spin_unlock_irq(&cm.lock);
1644                 if (cur_cm_id_priv) {
1645                         cm_dup_req_handler(work, cur_cm_id_priv);
1646                         cm_deref_id(cur_cm_id_priv);
1647                 }
1648                 return NULL;
1649         }
1650
1651         /* Check for stale connections. */
1652         timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1653         if (timewait_info) {
1654                 cm_cleanup_timewait(cm_id_priv->timewait_info);
1655                 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1656                                            timewait_info->work.remote_id);
1657
1658                 spin_unlock_irq(&cm.lock);
1659                 cm_issue_rej(work->port, work->mad_recv_wc,
1660                              IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1661                              NULL, 0);
1662                 if (cur_cm_id_priv) {
1663                         cm_id = &cur_cm_id_priv->id;
1664                         ib_send_cm_dreq(cm_id, NULL, 0);
1665                         cm_deref_id(cur_cm_id_priv);
1666                 }
1667                 return NULL;
1668         }
1669
1670         /* Find matching listen request. */
1671         listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1672                                            req_msg->service_id);
1673         if (!listen_cm_id_priv) {
1674                 cm_cleanup_timewait(cm_id_priv->timewait_info);
1675                 spin_unlock_irq(&cm.lock);
1676                 cm_issue_rej(work->port, work->mad_recv_wc,
1677                              IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1678                              NULL, 0);
1679                 goto out;
1680         }
1681         atomic_inc(&listen_cm_id_priv->refcount);
1682         atomic_inc(&cm_id_priv->refcount);
1683         cm_id_priv->id.state = IB_CM_REQ_RCVD;
1684         atomic_inc(&cm_id_priv->work_count);
1685         spin_unlock_irq(&cm.lock);
1686 out:
1687         return listen_cm_id_priv;
1688 }
1689
1690 /*
1691  * Work-around for inter-subnet connections.  If the LIDs are permissive,
1692  * we need to override the LID/SL data in the REQ with the LID information
1693  * in the work completion.
1694  */
1695 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1696 {
1697         if (!cm_req_get_primary_subnet_local(req_msg)) {
1698                 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1699                         req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1700                         cm_req_set_primary_sl(req_msg, wc->sl);
1701                 }
1702
1703                 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1704                         req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1705         }
1706
1707         if (!cm_req_get_alt_subnet_local(req_msg)) {
1708                 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1709                         req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1710                         cm_req_set_alt_sl(req_msg, wc->sl);
1711                 }
1712
1713                 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1714                         req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1715         }
1716 }
1717
1718 static int cm_req_handler(struct cm_work *work)
1719 {
1720         struct ib_cm_id *cm_id;
1721         struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1722         struct cm_req_msg *req_msg;
1723         union ib_gid gid;
1724         struct ib_gid_attr gid_attr;
1725         const struct ib_global_route *grh;
1726         int ret;
1727
1728         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1729
1730         cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1731         if (IS_ERR(cm_id))
1732                 return PTR_ERR(cm_id);
1733
1734         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1735         cm_id_priv->id.remote_id = req_msg->local_comm_id;
1736         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1737                                 work->mad_recv_wc->recv_buf.grh,
1738                                 &cm_id_priv->av);
1739         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1740                                                             id.local_id);
1741         if (IS_ERR(cm_id_priv->timewait_info)) {
1742                 ret = PTR_ERR(cm_id_priv->timewait_info);
1743                 goto destroy;
1744         }
1745         cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1746         cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1747         cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1748
1749         listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1750         if (!listen_cm_id_priv) {
1751                 ret = -EINVAL;
1752                 kfree(cm_id_priv->timewait_info);
1753                 goto destroy;
1754         }
1755
1756         cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1757         cm_id_priv->id.context = listen_cm_id_priv->id.context;
1758         cm_id_priv->id.service_id = req_msg->service_id;
1759         cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1760
1761         cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1762         cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1763
1764         if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
1765                 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.roce.dmac,
1766                        ETH_ALEN);
1767         grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1768         work->path[0].hop_limit = grh->hop_limit;
1769         ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1770                                 work->port->port_num,
1771                                 grh->sgid_index,
1772                                 &gid, &gid_attr);
1773         if (!ret) {
1774                 if (gid_attr.ndev) {
1775                         work->path[0].ifindex = gid_attr.ndev->ifindex;
1776                         work->path[0].net = dev_net(gid_attr.ndev);
1777                         dev_put(gid_attr.ndev);
1778                 }
1779                 work->path[0].gid_type = gid_attr.gid_type;
1780                 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1781                                          cm_id_priv);
1782         }
1783         if (ret) {
1784                 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1785                                             work->port->port_num, 0,
1786                                             &work->path[0].sgid,
1787                                             &gid_attr);
1788                 if (!err && gid_attr.ndev) {
1789                         work->path[0].ifindex = gid_attr.ndev->ifindex;
1790                         work->path[0].net = dev_net(gid_attr.ndev);
1791                         dev_put(gid_attr.ndev);
1792                 }
1793                 work->path[0].gid_type = gid_attr.gid_type;
1794                 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1795                                &work->path[0].sgid, sizeof work->path[0].sgid,
1796                                NULL, 0);
1797                 goto rejected;
1798         }
1799         if (req_msg->alt_local_lid) {
1800                 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1801                                          cm_id_priv);
1802                 if (ret) {
1803                         ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1804                                        &work->path[0].sgid,
1805                                        sizeof work->path[0].sgid, NULL, 0);
1806                         goto rejected;
1807                 }
1808         }
1809         cm_id_priv->tid = req_msg->hdr.tid;
1810         cm_id_priv->timeout_ms = cm_convert_to_ms(
1811                                         cm_req_get_local_resp_timeout(req_msg));
1812         cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1813         cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1814         cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1815         cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1816         cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1817         cm_id_priv->pkey = req_msg->pkey;
1818         cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1819         cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1820         cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1821         cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1822
1823         cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1824         cm_process_work(cm_id_priv, work);
1825         cm_deref_id(listen_cm_id_priv);
1826         return 0;
1827
1828 rejected:
1829         atomic_dec(&cm_id_priv->refcount);
1830         cm_deref_id(listen_cm_id_priv);
1831 destroy:
1832         ib_destroy_cm_id(cm_id);
1833         return ret;
1834 }
1835
1836 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1837                           struct cm_id_private *cm_id_priv,
1838                           struct ib_cm_rep_param *param)
1839 {
1840         cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1841         rep_msg->local_comm_id = cm_id_priv->id.local_id;
1842         rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1843         cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1844         rep_msg->resp_resources = param->responder_resources;
1845         cm_rep_set_target_ack_delay(rep_msg,
1846                                     cm_id_priv->av.port->cm_dev->ack_delay);
1847         cm_rep_set_failover(rep_msg, param->failover_accepted);
1848         cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1849         rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1850
1851         if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1852                 rep_msg->initiator_depth = param->initiator_depth;
1853                 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1854                 cm_rep_set_srq(rep_msg, param->srq);
1855                 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1856         } else {
1857                 cm_rep_set_srq(rep_msg, 1);
1858                 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1859         }
1860
1861         if (param->private_data && param->private_data_len)
1862                 memcpy(rep_msg->private_data, param->private_data,
1863                        param->private_data_len);
1864 }
1865
1866 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1867                    struct ib_cm_rep_param *param)
1868 {
1869         struct cm_id_private *cm_id_priv;
1870         struct ib_mad_send_buf *msg;
1871         struct cm_rep_msg *rep_msg;
1872         unsigned long flags;
1873         int ret;
1874
1875         if (param->private_data &&
1876             param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1877                 return -EINVAL;
1878
1879         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1880         spin_lock_irqsave(&cm_id_priv->lock, flags);
1881         if (cm_id->state != IB_CM_REQ_RCVD &&
1882             cm_id->state != IB_CM_MRA_REQ_SENT) {
1883                 ret = -EINVAL;
1884                 goto out;
1885         }
1886
1887         ret = cm_alloc_msg(cm_id_priv, &msg);
1888         if (ret)
1889                 goto out;
1890
1891         rep_msg = (struct cm_rep_msg *) msg->mad;
1892         cm_format_rep(rep_msg, cm_id_priv, param);
1893         msg->timeout_ms = cm_id_priv->timeout_ms;
1894         msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1895
1896         ret = ib_post_send_mad(msg, NULL);
1897         if (ret) {
1898                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1899                 cm_free_msg(msg);
1900                 return ret;
1901         }
1902
1903         cm_id->state = IB_CM_REP_SENT;
1904         cm_id_priv->msg = msg;
1905         cm_id_priv->initiator_depth = param->initiator_depth;
1906         cm_id_priv->responder_resources = param->responder_resources;
1907         cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1908         cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1909
1910 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1911         return ret;
1912 }
1913 EXPORT_SYMBOL(ib_send_cm_rep);
1914
1915 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1916                           struct cm_id_private *cm_id_priv,
1917                           const void *private_data,
1918                           u8 private_data_len)
1919 {
1920         cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1921         rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1922         rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1923
1924         if (private_data && private_data_len)
1925                 memcpy(rtu_msg->private_data, private_data, private_data_len);
1926 }
1927
1928 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1929                    const void *private_data,
1930                    u8 private_data_len)
1931 {
1932         struct cm_id_private *cm_id_priv;
1933         struct ib_mad_send_buf *msg;
1934         unsigned long flags;
1935         void *data;
1936         int ret;
1937
1938         if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1939                 return -EINVAL;
1940
1941         data = cm_copy_private_data(private_data, private_data_len);
1942         if (IS_ERR(data))
1943                 return PTR_ERR(data);
1944
1945         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1946         spin_lock_irqsave(&cm_id_priv->lock, flags);
1947         if (cm_id->state != IB_CM_REP_RCVD &&
1948             cm_id->state != IB_CM_MRA_REP_SENT) {
1949                 ret = -EINVAL;
1950                 goto error;
1951         }
1952
1953         ret = cm_alloc_msg(cm_id_priv, &msg);
1954         if (ret)
1955                 goto error;
1956
1957         cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1958                       private_data, private_data_len);
1959
1960         ret = ib_post_send_mad(msg, NULL);
1961         if (ret) {
1962                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1963                 cm_free_msg(msg);
1964                 kfree(data);
1965                 return ret;
1966         }
1967
1968         cm_id->state = IB_CM_ESTABLISHED;
1969         cm_set_private_data(cm_id_priv, data, private_data_len);
1970         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1971         return 0;
1972
1973 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1974         kfree(data);
1975         return ret;
1976 }
1977 EXPORT_SYMBOL(ib_send_cm_rtu);
1978
1979 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1980 {
1981         struct cm_rep_msg *rep_msg;
1982         struct ib_cm_rep_event_param *param;
1983
1984         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1985         param = &work->cm_event.param.rep_rcvd;
1986         param->remote_ca_guid = rep_msg->local_ca_guid;
1987         param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1988         param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1989         param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1990         param->responder_resources = rep_msg->initiator_depth;
1991         param->initiator_depth = rep_msg->resp_resources;
1992         param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1993         param->failover_accepted = cm_rep_get_failover(rep_msg);
1994         param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1995         param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1996         param->srq = cm_rep_get_srq(rep_msg);
1997         work->cm_event.private_data = &rep_msg->private_data;
1998 }
1999
2000 static void cm_dup_rep_handler(struct cm_work *work)
2001 {
2002         struct cm_id_private *cm_id_priv;
2003         struct cm_rep_msg *rep_msg;
2004         struct ib_mad_send_buf *msg = NULL;
2005         int ret;
2006
2007         rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2008         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2009                                    rep_msg->local_comm_id);
2010         if (!cm_id_priv)
2011                 return;
2012
2013         atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2014                         counter[CM_REP_COUNTER]);
2015         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2016         if (ret)
2017                 goto deref;
2018
2019         spin_lock_irq(&cm_id_priv->lock);
2020         if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2021                 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2022                               cm_id_priv->private_data,
2023                               cm_id_priv->private_data_len);
2024         else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2025                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2026                               CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2027                               cm_id_priv->private_data,
2028                               cm_id_priv->private_data_len);
2029         else
2030                 goto unlock;
2031         spin_unlock_irq(&cm_id_priv->lock);
2032
2033         ret = ib_post_send_mad(msg, NULL);
2034         if (ret)
2035                 goto free;
2036         goto deref;
2037
2038 unlock: spin_unlock_irq(&cm_id_priv->lock);
2039 free:   cm_free_msg(msg);
2040 deref:  cm_deref_id(cm_id_priv);
2041 }
2042
2043 static int cm_rep_handler(struct cm_work *work)
2044 {
2045         struct cm_id_private *cm_id_priv;
2046         struct cm_rep_msg *rep_msg;
2047         int ret;
2048         struct cm_id_private *cur_cm_id_priv;
2049         struct ib_cm_id *cm_id;
2050         struct cm_timewait_info *timewait_info;
2051
2052         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2053         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2054         if (!cm_id_priv) {
2055                 cm_dup_rep_handler(work);
2056                 return -EINVAL;
2057         }
2058
2059         cm_format_rep_event(work, cm_id_priv->qp_type);
2060
2061         spin_lock_irq(&cm_id_priv->lock);
2062         switch (cm_id_priv->id.state) {
2063         case IB_CM_REQ_SENT:
2064         case IB_CM_MRA_REQ_RCVD:
2065                 break;
2066         default:
2067                 spin_unlock_irq(&cm_id_priv->lock);
2068                 ret = -EINVAL;
2069                 goto error;
2070         }
2071
2072         cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2073         cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2074         cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2075
2076         spin_lock(&cm.lock);
2077         /* Check for duplicate REP. */
2078         if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2079                 spin_unlock(&cm.lock);
2080                 spin_unlock_irq(&cm_id_priv->lock);
2081                 ret = -EINVAL;
2082                 goto error;
2083         }
2084         /* Check for a stale connection. */
2085         timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2086         if (timewait_info) {
2087                 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2088                          &cm.remote_id_table);
2089                 cm_id_priv->timewait_info->inserted_remote_id = 0;
2090                 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2091                                            timewait_info->work.remote_id);
2092
2093                 spin_unlock(&cm.lock);
2094                 spin_unlock_irq(&cm_id_priv->lock);
2095                 cm_issue_rej(work->port, work->mad_recv_wc,
2096                              IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2097                              NULL, 0);
2098                 ret = -EINVAL;
2099                 if (cur_cm_id_priv) {
2100                         cm_id = &cur_cm_id_priv->id;
2101                         ib_send_cm_dreq(cm_id, NULL, 0);
2102                         cm_deref_id(cur_cm_id_priv);
2103                 }
2104
2105                 goto error;
2106         }
2107         spin_unlock(&cm.lock);
2108
2109         cm_id_priv->id.state = IB_CM_REP_RCVD;
2110         cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2111         cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2112         cm_id_priv->initiator_depth = rep_msg->resp_resources;
2113         cm_id_priv->responder_resources = rep_msg->initiator_depth;
2114         cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2115         cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2116         cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2117         cm_id_priv->av.timeout =
2118                         cm_ack_timeout(cm_id_priv->target_ack_delay,
2119                                        cm_id_priv->av.timeout - 1);
2120         cm_id_priv->alt_av.timeout =
2121                         cm_ack_timeout(cm_id_priv->target_ack_delay,
2122                                        cm_id_priv->alt_av.timeout - 1);
2123
2124         /* todo: handle peer_to_peer */
2125
2126         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2127         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2128         if (!ret)
2129                 list_add_tail(&work->list, &cm_id_priv->work_list);
2130         spin_unlock_irq(&cm_id_priv->lock);
2131
2132         if (ret)
2133                 cm_process_work(cm_id_priv, work);
2134         else
2135                 cm_deref_id(cm_id_priv);
2136         return 0;
2137
2138 error:
2139         cm_deref_id(cm_id_priv);
2140         return ret;
2141 }
2142
2143 static int cm_establish_handler(struct cm_work *work)
2144 {
2145         struct cm_id_private *cm_id_priv;
2146         int ret;
2147
2148         /* See comment in cm_establish about lookup. */
2149         cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2150         if (!cm_id_priv)
2151                 return -EINVAL;
2152
2153         spin_lock_irq(&cm_id_priv->lock);
2154         if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2155                 spin_unlock_irq(&cm_id_priv->lock);
2156                 goto out;
2157         }
2158
2159         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2160         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2161         if (!ret)
2162                 list_add_tail(&work->list, &cm_id_priv->work_list);
2163         spin_unlock_irq(&cm_id_priv->lock);
2164
2165         if (ret)
2166                 cm_process_work(cm_id_priv, work);
2167         else
2168                 cm_deref_id(cm_id_priv);
2169         return 0;
2170 out:
2171         cm_deref_id(cm_id_priv);
2172         return -EINVAL;
2173 }
2174
2175 static int cm_rtu_handler(struct cm_work *work)
2176 {
2177         struct cm_id_private *cm_id_priv;
2178         struct cm_rtu_msg *rtu_msg;
2179         int ret;
2180
2181         rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2182         cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2183                                    rtu_msg->local_comm_id);
2184         if (!cm_id_priv)
2185                 return -EINVAL;
2186
2187         work->cm_event.private_data = &rtu_msg->private_data;
2188
2189         spin_lock_irq(&cm_id_priv->lock);
2190         if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2191             cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2192                 spin_unlock_irq(&cm_id_priv->lock);
2193                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2194                                 counter[CM_RTU_COUNTER]);
2195                 goto out;
2196         }
2197         cm_id_priv->id.state = IB_CM_ESTABLISHED;
2198
2199         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2200         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2201         if (!ret)
2202                 list_add_tail(&work->list, &cm_id_priv->work_list);
2203         spin_unlock_irq(&cm_id_priv->lock);
2204
2205         if (ret)
2206                 cm_process_work(cm_id_priv, work);
2207         else
2208                 cm_deref_id(cm_id_priv);
2209         return 0;
2210 out:
2211         cm_deref_id(cm_id_priv);
2212         return -EINVAL;
2213 }
2214
2215 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2216                           struct cm_id_private *cm_id_priv,
2217                           const void *private_data,
2218                           u8 private_data_len)
2219 {
2220         cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2221                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2222         dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2223         dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2224         cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2225
2226         if (private_data && private_data_len)
2227                 memcpy(dreq_msg->private_data, private_data, private_data_len);
2228 }
2229
2230 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2231                     const void *private_data,
2232                     u8 private_data_len)
2233 {
2234         struct cm_id_private *cm_id_priv;
2235         struct ib_mad_send_buf *msg;
2236         unsigned long flags;
2237         int ret;
2238
2239         if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2240                 return -EINVAL;
2241
2242         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2243         spin_lock_irqsave(&cm_id_priv->lock, flags);
2244         if (cm_id->state != IB_CM_ESTABLISHED) {
2245                 ret = -EINVAL;
2246                 goto out;
2247         }
2248
2249         if (cm_id->lap_state == IB_CM_LAP_SENT ||
2250             cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2251                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2252
2253         ret = cm_alloc_msg(cm_id_priv, &msg);
2254         if (ret) {
2255                 cm_enter_timewait(cm_id_priv);
2256                 goto out;
2257         }
2258
2259         cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2260                        private_data, private_data_len);
2261         msg->timeout_ms = cm_id_priv->timeout_ms;
2262         msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2263
2264         ret = ib_post_send_mad(msg, NULL);
2265         if (ret) {
2266                 cm_enter_timewait(cm_id_priv);
2267                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2268                 cm_free_msg(msg);
2269                 return ret;
2270         }
2271
2272         cm_id->state = IB_CM_DREQ_SENT;
2273         cm_id_priv->msg = msg;
2274 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2275         return ret;
2276 }
2277 EXPORT_SYMBOL(ib_send_cm_dreq);
2278
2279 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2280                           struct cm_id_private *cm_id_priv,
2281                           const void *private_data,
2282                           u8 private_data_len)
2283 {
2284         cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2285         drep_msg->local_comm_id = cm_id_priv->id.local_id;
2286         drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2287
2288         if (private_data && private_data_len)
2289                 memcpy(drep_msg->private_data, private_data, private_data_len);
2290 }
2291
2292 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2293                     const void *private_data,
2294                     u8 private_data_len)
2295 {
2296         struct cm_id_private *cm_id_priv;
2297         struct ib_mad_send_buf *msg;
2298         unsigned long flags;
2299         void *data;
2300         int ret;
2301
2302         if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2303                 return -EINVAL;
2304
2305         data = cm_copy_private_data(private_data, private_data_len);
2306         if (IS_ERR(data))
2307                 return PTR_ERR(data);
2308
2309         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2310         spin_lock_irqsave(&cm_id_priv->lock, flags);
2311         if (cm_id->state != IB_CM_DREQ_RCVD) {
2312                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2313                 kfree(data);
2314                 return -EINVAL;
2315         }
2316
2317         cm_set_private_data(cm_id_priv, data, private_data_len);
2318         cm_enter_timewait(cm_id_priv);
2319
2320         ret = cm_alloc_msg(cm_id_priv, &msg);
2321         if (ret)
2322                 goto out;
2323
2324         cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2325                        private_data, private_data_len);
2326
2327         ret = ib_post_send_mad(msg, NULL);
2328         if (ret) {
2329                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2330                 cm_free_msg(msg);
2331                 return ret;
2332         }
2333
2334 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2335         return ret;
2336 }
2337 EXPORT_SYMBOL(ib_send_cm_drep);
2338
2339 static int cm_issue_drep(struct cm_port *port,
2340                          struct ib_mad_recv_wc *mad_recv_wc)
2341 {
2342         struct ib_mad_send_buf *msg = NULL;
2343         struct cm_dreq_msg *dreq_msg;
2344         struct cm_drep_msg *drep_msg;
2345         int ret;
2346
2347         ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2348         if (ret)
2349                 return ret;
2350
2351         dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2352         drep_msg = (struct cm_drep_msg *) msg->mad;
2353
2354         cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2355         drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2356         drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2357
2358         ret = ib_post_send_mad(msg, NULL);
2359         if (ret)
2360                 cm_free_msg(msg);
2361
2362         return ret;
2363 }
2364
2365 static int cm_dreq_handler(struct cm_work *work)
2366 {
2367         struct cm_id_private *cm_id_priv;
2368         struct cm_dreq_msg *dreq_msg;
2369         struct ib_mad_send_buf *msg = NULL;
2370         int ret;
2371
2372         dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2373         cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2374                                    dreq_msg->local_comm_id);
2375         if (!cm_id_priv) {
2376                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2377                                 counter[CM_DREQ_COUNTER]);
2378                 cm_issue_drep(work->port, work->mad_recv_wc);
2379                 return -EINVAL;
2380         }
2381
2382         work->cm_event.private_data = &dreq_msg->private_data;
2383
2384         spin_lock_irq(&cm_id_priv->lock);
2385         if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2386                 goto unlock;
2387
2388         switch (cm_id_priv->id.state) {
2389         case IB_CM_REP_SENT:
2390         case IB_CM_DREQ_SENT:
2391                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2392                 break;
2393         case IB_CM_ESTABLISHED:
2394                 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2395                     cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2396                         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2397                 break;
2398         case IB_CM_MRA_REP_RCVD:
2399                 break;
2400         case IB_CM_TIMEWAIT:
2401                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2402                                 counter[CM_DREQ_COUNTER]);
2403                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2404                         goto unlock;
2405
2406                 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2407                                cm_id_priv->private_data,
2408                                cm_id_priv->private_data_len);
2409                 spin_unlock_irq(&cm_id_priv->lock);
2410
2411                 if (ib_post_send_mad(msg, NULL))
2412                         cm_free_msg(msg);
2413                 goto deref;
2414         case IB_CM_DREQ_RCVD:
2415                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2416                                 counter[CM_DREQ_COUNTER]);
2417                 goto unlock;
2418         default:
2419                 goto unlock;
2420         }
2421         cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2422         cm_id_priv->tid = dreq_msg->hdr.tid;
2423         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2424         if (!ret)
2425                 list_add_tail(&work->list, &cm_id_priv->work_list);
2426         spin_unlock_irq(&cm_id_priv->lock);
2427
2428         if (ret)
2429                 cm_process_work(cm_id_priv, work);
2430         else
2431                 cm_deref_id(cm_id_priv);
2432         return 0;
2433
2434 unlock: spin_unlock_irq(&cm_id_priv->lock);
2435 deref:  cm_deref_id(cm_id_priv);
2436         return -EINVAL;
2437 }
2438
2439 static int cm_drep_handler(struct cm_work *work)
2440 {
2441         struct cm_id_private *cm_id_priv;
2442         struct cm_drep_msg *drep_msg;
2443         int ret;
2444
2445         drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2446         cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2447                                    drep_msg->local_comm_id);
2448         if (!cm_id_priv)
2449                 return -EINVAL;
2450
2451         work->cm_event.private_data = &drep_msg->private_data;
2452
2453         spin_lock_irq(&cm_id_priv->lock);
2454         if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2455             cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2456                 spin_unlock_irq(&cm_id_priv->lock);
2457                 goto out;
2458         }
2459         cm_enter_timewait(cm_id_priv);
2460
2461         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2462         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2463         if (!ret)
2464                 list_add_tail(&work->list, &cm_id_priv->work_list);
2465         spin_unlock_irq(&cm_id_priv->lock);
2466
2467         if (ret)
2468                 cm_process_work(cm_id_priv, work);
2469         else
2470                 cm_deref_id(cm_id_priv);
2471         return 0;
2472 out:
2473         cm_deref_id(cm_id_priv);
2474         return -EINVAL;
2475 }
2476
2477 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2478                    enum ib_cm_rej_reason reason,
2479                    void *ari,
2480                    u8 ari_length,
2481                    const void *private_data,
2482                    u8 private_data_len)
2483 {
2484         struct cm_id_private *cm_id_priv;
2485         struct ib_mad_send_buf *msg;
2486         unsigned long flags;
2487         int ret;
2488
2489         if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2490             (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2491                 return -EINVAL;
2492
2493         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2494
2495         spin_lock_irqsave(&cm_id_priv->lock, flags);
2496         switch (cm_id->state) {
2497         case IB_CM_REQ_SENT:
2498         case IB_CM_MRA_REQ_RCVD:
2499         case IB_CM_REQ_RCVD:
2500         case IB_CM_MRA_REQ_SENT:
2501         case IB_CM_REP_RCVD:
2502         case IB_CM_MRA_REP_SENT:
2503                 ret = cm_alloc_msg(cm_id_priv, &msg);
2504                 if (!ret)
2505                         cm_format_rej((struct cm_rej_msg *) msg->mad,
2506                                       cm_id_priv, reason, ari, ari_length,
2507                                       private_data, private_data_len);
2508
2509                 cm_reset_to_idle(cm_id_priv);
2510                 break;
2511         case IB_CM_REP_SENT:
2512         case IB_CM_MRA_REP_RCVD:
2513                 ret = cm_alloc_msg(cm_id_priv, &msg);
2514                 if (!ret)
2515                         cm_format_rej((struct cm_rej_msg *) msg->mad,
2516                                       cm_id_priv, reason, ari, ari_length,
2517                                       private_data, private_data_len);
2518
2519                 cm_enter_timewait(cm_id_priv);
2520                 break;
2521         default:
2522                 ret = -EINVAL;
2523                 goto out;
2524         }
2525
2526         if (ret)
2527                 goto out;
2528
2529         ret = ib_post_send_mad(msg, NULL);
2530         if (ret)
2531                 cm_free_msg(msg);
2532
2533 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2534         return ret;
2535 }
2536 EXPORT_SYMBOL(ib_send_cm_rej);
2537
2538 static void cm_format_rej_event(struct cm_work *work)
2539 {
2540         struct cm_rej_msg *rej_msg;
2541         struct ib_cm_rej_event_param *param;
2542
2543         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2544         param = &work->cm_event.param.rej_rcvd;
2545         param->ari = rej_msg->ari;
2546         param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2547         param->reason = __be16_to_cpu(rej_msg->reason);
2548         work->cm_event.private_data = &rej_msg->private_data;
2549 }
2550
2551 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2552 {
2553         struct cm_timewait_info *timewait_info;
2554         struct cm_id_private *cm_id_priv;
2555         __be32 remote_id;
2556
2557         remote_id = rej_msg->local_comm_id;
2558
2559         if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2560                 spin_lock_irq(&cm.lock);
2561                 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2562                                                   remote_id);
2563                 if (!timewait_info) {
2564                         spin_unlock_irq(&cm.lock);
2565                         return NULL;
2566                 }
2567                 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2568                                       (timewait_info->work.local_id ^
2569                                        cm.random_id_operand));
2570                 if (cm_id_priv) {
2571                         if (cm_id_priv->id.remote_id == remote_id)
2572                                 atomic_inc(&cm_id_priv->refcount);
2573                         else
2574                                 cm_id_priv = NULL;
2575                 }
2576                 spin_unlock_irq(&cm.lock);
2577         } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2578                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2579         else
2580                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2581
2582         return cm_id_priv;
2583 }
2584
2585 static int cm_rej_handler(struct cm_work *work)
2586 {
2587         struct cm_id_private *cm_id_priv;
2588         struct cm_rej_msg *rej_msg;
2589         int ret;
2590
2591         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2592         cm_id_priv = cm_acquire_rejected_id(rej_msg);
2593         if (!cm_id_priv)
2594                 return -EINVAL;
2595
2596         cm_format_rej_event(work);
2597
2598         spin_lock_irq(&cm_id_priv->lock);
2599         switch (cm_id_priv->id.state) {
2600         case IB_CM_REQ_SENT:
2601         case IB_CM_MRA_REQ_RCVD:
2602         case IB_CM_REP_SENT:
2603         case IB_CM_MRA_REP_RCVD:
2604                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2605                 /* fall through */
2606         case IB_CM_REQ_RCVD:
2607         case IB_CM_MRA_REQ_SENT:
2608                 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2609                         cm_enter_timewait(cm_id_priv);
2610                 else
2611                         cm_reset_to_idle(cm_id_priv);
2612                 break;
2613         case IB_CM_DREQ_SENT:
2614                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2615                 /* fall through */
2616         case IB_CM_REP_RCVD:
2617         case IB_CM_MRA_REP_SENT:
2618                 cm_enter_timewait(cm_id_priv);
2619                 break;
2620         case IB_CM_ESTABLISHED:
2621                 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2622                     cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2623                         if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2624                                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2625                                               cm_id_priv->msg);
2626                         cm_enter_timewait(cm_id_priv);
2627                         break;
2628                 }
2629                 /* fall through */
2630         default:
2631                 spin_unlock_irq(&cm_id_priv->lock);
2632                 ret = -EINVAL;
2633                 goto out;
2634         }
2635
2636         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2637         if (!ret)
2638                 list_add_tail(&work->list, &cm_id_priv->work_list);
2639         spin_unlock_irq(&cm_id_priv->lock);
2640
2641         if (ret)
2642                 cm_process_work(cm_id_priv, work);
2643         else
2644                 cm_deref_id(cm_id_priv);
2645         return 0;
2646 out:
2647         cm_deref_id(cm_id_priv);
2648         return -EINVAL;
2649 }
2650
2651 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2652                    u8 service_timeout,
2653                    const void *private_data,
2654                    u8 private_data_len)
2655 {
2656         struct cm_id_private *cm_id_priv;
2657         struct ib_mad_send_buf *msg;
2658         enum ib_cm_state cm_state;
2659         enum ib_cm_lap_state lap_state;
2660         enum cm_msg_response msg_response;
2661         void *data;
2662         unsigned long flags;
2663         int ret;
2664
2665         if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2666                 return -EINVAL;
2667
2668         data = cm_copy_private_data(private_data, private_data_len);
2669         if (IS_ERR(data))
2670                 return PTR_ERR(data);
2671
2672         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2673
2674         spin_lock_irqsave(&cm_id_priv->lock, flags);
2675         switch(cm_id_priv->id.state) {
2676         case IB_CM_REQ_RCVD:
2677                 cm_state = IB_CM_MRA_REQ_SENT;
2678                 lap_state = cm_id->lap_state;
2679                 msg_response = CM_MSG_RESPONSE_REQ;
2680                 break;
2681         case IB_CM_REP_RCVD:
2682                 cm_state = IB_CM_MRA_REP_SENT;
2683                 lap_state = cm_id->lap_state;
2684                 msg_response = CM_MSG_RESPONSE_REP;
2685                 break;
2686         case IB_CM_ESTABLISHED:
2687                 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2688                         cm_state = cm_id->state;
2689                         lap_state = IB_CM_MRA_LAP_SENT;
2690                         msg_response = CM_MSG_RESPONSE_OTHER;
2691                         break;
2692                 }
2693         default:
2694                 ret = -EINVAL;
2695                 goto error1;
2696         }
2697
2698         if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2699                 ret = cm_alloc_msg(cm_id_priv, &msg);
2700                 if (ret)
2701                         goto error1;
2702
2703                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2704                               msg_response, service_timeout,
2705                               private_data, private_data_len);
2706                 ret = ib_post_send_mad(msg, NULL);
2707                 if (ret)
2708                         goto error2;
2709         }
2710
2711         cm_id->state = cm_state;
2712         cm_id->lap_state = lap_state;
2713         cm_id_priv->service_timeout = service_timeout;
2714         cm_set_private_data(cm_id_priv, data, private_data_len);
2715         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2716         return 0;
2717
2718 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2719         kfree(data);
2720         return ret;
2721
2722 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2723         kfree(data);
2724         cm_free_msg(msg);
2725         return ret;
2726 }
2727 EXPORT_SYMBOL(ib_send_cm_mra);
2728
2729 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2730 {
2731         switch (cm_mra_get_msg_mraed(mra_msg)) {
2732         case CM_MSG_RESPONSE_REQ:
2733                 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2734         case CM_MSG_RESPONSE_REP:
2735         case CM_MSG_RESPONSE_OTHER:
2736                 return cm_acquire_id(mra_msg->remote_comm_id,
2737                                      mra_msg->local_comm_id);
2738         default:
2739                 return NULL;
2740         }
2741 }
2742
2743 static int cm_mra_handler(struct cm_work *work)
2744 {
2745         struct cm_id_private *cm_id_priv;
2746         struct cm_mra_msg *mra_msg;
2747         int timeout, ret;
2748
2749         mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2750         cm_id_priv = cm_acquire_mraed_id(mra_msg);
2751         if (!cm_id_priv)
2752                 return -EINVAL;
2753
2754         work->cm_event.private_data = &mra_msg->private_data;
2755         work->cm_event.param.mra_rcvd.service_timeout =
2756                                         cm_mra_get_service_timeout(mra_msg);
2757         timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2758                   cm_convert_to_ms(cm_id_priv->av.timeout);
2759
2760         spin_lock_irq(&cm_id_priv->lock);
2761         switch (cm_id_priv->id.state) {
2762         case IB_CM_REQ_SENT:
2763                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2764                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2765                                   cm_id_priv->msg, timeout))
2766                         goto out;
2767                 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2768                 break;
2769         case IB_CM_REP_SENT:
2770                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2771                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2772                                   cm_id_priv->msg, timeout))
2773                         goto out;
2774                 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2775                 break;
2776         case IB_CM_ESTABLISHED:
2777                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2778                     cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2779                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2780                                   cm_id_priv->msg, timeout)) {
2781                         if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2782                                 atomic_long_inc(&work->port->
2783                                                 counter_group[CM_RECV_DUPLICATES].
2784                                                 counter[CM_MRA_COUNTER]);
2785                         goto out;
2786                 }
2787                 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2788                 break;
2789         case IB_CM_MRA_REQ_RCVD:
2790         case IB_CM_MRA_REP_RCVD:
2791                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2792                                 counter[CM_MRA_COUNTER]);
2793                 /* fall through */
2794         default:
2795                 goto out;
2796         }
2797
2798         cm_id_priv->msg->context[1] = (void *) (unsigned long)
2799                                       cm_id_priv->id.state;
2800         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2801         if (!ret)
2802                 list_add_tail(&work->list, &cm_id_priv->work_list);
2803         spin_unlock_irq(&cm_id_priv->lock);
2804
2805         if (ret)
2806                 cm_process_work(cm_id_priv, work);
2807         else
2808                 cm_deref_id(cm_id_priv);
2809         return 0;
2810 out:
2811         spin_unlock_irq(&cm_id_priv->lock);
2812         cm_deref_id(cm_id_priv);
2813         return -EINVAL;
2814 }
2815
2816 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2817                           struct cm_id_private *cm_id_priv,
2818                           struct ib_sa_path_rec *alternate_path,
2819                           const void *private_data,
2820                           u8 private_data_len)
2821 {
2822         cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2823                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2824         lap_msg->local_comm_id = cm_id_priv->id.local_id;
2825         lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2826         cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2827         /* todo: need remote CM response timeout */
2828         cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2829         lap_msg->alt_local_lid = alternate_path->slid;
2830         lap_msg->alt_remote_lid = alternate_path->dlid;
2831         lap_msg->alt_local_gid = alternate_path->sgid;
2832         lap_msg->alt_remote_gid = alternate_path->dgid;
2833         cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2834         cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2835         lap_msg->alt_hop_limit = alternate_path->hop_limit;
2836         cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2837         cm_lap_set_sl(lap_msg, alternate_path->sl);
2838         cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2839         cm_lap_set_local_ack_timeout(lap_msg,
2840                 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2841                                alternate_path->packet_life_time));
2842
2843         if (private_data && private_data_len)
2844                 memcpy(lap_msg->private_data, private_data, private_data_len);
2845 }
2846
2847 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2848                    struct ib_sa_path_rec *alternate_path,
2849                    const void *private_data,
2850                    u8 private_data_len)
2851 {
2852         struct cm_id_private *cm_id_priv;
2853         struct ib_mad_send_buf *msg;
2854         unsigned long flags;
2855         int ret;
2856
2857         if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2858                 return -EINVAL;
2859
2860         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2861         spin_lock_irqsave(&cm_id_priv->lock, flags);
2862         if (cm_id->state != IB_CM_ESTABLISHED ||
2863             (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2864              cm_id->lap_state != IB_CM_LAP_IDLE)) {
2865                 ret = -EINVAL;
2866                 goto out;
2867         }
2868
2869         ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
2870                                  cm_id_priv);
2871         if (ret)
2872                 goto out;
2873         cm_id_priv->alt_av.timeout =
2874                         cm_ack_timeout(cm_id_priv->target_ack_delay,
2875                                        cm_id_priv->alt_av.timeout - 1);
2876
2877         ret = cm_alloc_msg(cm_id_priv, &msg);
2878         if (ret)
2879                 goto out;
2880
2881         cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2882                       alternate_path, private_data, private_data_len);
2883         msg->timeout_ms = cm_id_priv->timeout_ms;
2884         msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2885
2886         ret = ib_post_send_mad(msg, NULL);
2887         if (ret) {
2888                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2889                 cm_free_msg(msg);
2890                 return ret;
2891         }
2892
2893         cm_id->lap_state = IB_CM_LAP_SENT;
2894         cm_id_priv->msg = msg;
2895
2896 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2897         return ret;
2898 }
2899 EXPORT_SYMBOL(ib_send_cm_lap);
2900
2901 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2902                                     struct ib_sa_path_rec *path,
2903                                     struct cm_lap_msg *lap_msg)
2904 {
2905         memset(path, 0, sizeof *path);
2906         path->dgid = lap_msg->alt_local_gid;
2907         path->sgid = lap_msg->alt_remote_gid;
2908         path->dlid = lap_msg->alt_local_lid;
2909         path->slid = lap_msg->alt_remote_lid;
2910         path->flow_label = cm_lap_get_flow_label(lap_msg);
2911         path->hop_limit = lap_msg->alt_hop_limit;
2912         path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2913         path->reversible = 1;
2914         path->pkey = cm_id_priv->pkey;
2915         path->sl = cm_lap_get_sl(lap_msg);
2916         path->mtu_selector = IB_SA_EQ;
2917         path->mtu = cm_id_priv->path_mtu;
2918         path->rate_selector = IB_SA_EQ;
2919         path->rate = cm_lap_get_packet_rate(lap_msg);
2920         path->packet_life_time_selector = IB_SA_EQ;
2921         path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2922         path->packet_life_time -= (path->packet_life_time > 0);
2923 }
2924
2925 static int cm_lap_handler(struct cm_work *work)
2926 {
2927         struct cm_id_private *cm_id_priv;
2928         struct cm_lap_msg *lap_msg;
2929         struct ib_cm_lap_event_param *param;
2930         struct ib_mad_send_buf *msg = NULL;
2931         int ret;
2932
2933         /* todo: verify LAP request and send reject APR if invalid. */
2934         lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2935         cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2936                                    lap_msg->local_comm_id);
2937         if (!cm_id_priv)
2938                 return -EINVAL;
2939
2940         param = &work->cm_event.param.lap_rcvd;
2941         param->alternate_path = &work->path[0];
2942         cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2943         work->cm_event.private_data = &lap_msg->private_data;
2944
2945         spin_lock_irq(&cm_id_priv->lock);
2946         if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2947                 goto unlock;
2948
2949         switch (cm_id_priv->id.lap_state) {
2950         case IB_CM_LAP_UNINIT:
2951         case IB_CM_LAP_IDLE:
2952                 break;
2953         case IB_CM_MRA_LAP_SENT:
2954                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2955                                 counter[CM_LAP_COUNTER]);
2956                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2957                         goto unlock;
2958
2959                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2960                               CM_MSG_RESPONSE_OTHER,
2961                               cm_id_priv->service_timeout,
2962                               cm_id_priv->private_data,
2963                               cm_id_priv->private_data_len);
2964                 spin_unlock_irq(&cm_id_priv->lock);
2965
2966                 if (ib_post_send_mad(msg, NULL))
2967                         cm_free_msg(msg);
2968                 goto deref;
2969         case IB_CM_LAP_RCVD:
2970                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2971                                 counter[CM_LAP_COUNTER]);
2972                 goto unlock;
2973         default:
2974                 goto unlock;
2975         }
2976
2977         cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2978         cm_id_priv->tid = lap_msg->hdr.tid;
2979         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2980                                 work->mad_recv_wc->recv_buf.grh,
2981                                 &cm_id_priv->av);
2982         cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
2983                            cm_id_priv);
2984         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2985         if (!ret)
2986                 list_add_tail(&work->list, &cm_id_priv->work_list);
2987         spin_unlock_irq(&cm_id_priv->lock);
2988
2989         if (ret)
2990                 cm_process_work(cm_id_priv, work);
2991         else
2992                 cm_deref_id(cm_id_priv);
2993         return 0;
2994
2995 unlock: spin_unlock_irq(&cm_id_priv->lock);
2996 deref:  cm_deref_id(cm_id_priv);
2997         return -EINVAL;
2998 }
2999
3000 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3001                           struct cm_id_private *cm_id_priv,
3002                           enum ib_cm_apr_status status,
3003                           void *info,
3004                           u8 info_length,
3005                           const void *private_data,
3006                           u8 private_data_len)
3007 {
3008         cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3009         apr_msg->local_comm_id = cm_id_priv->id.local_id;
3010         apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3011         apr_msg->ap_status = (u8) status;
3012
3013         if (info && info_length) {
3014                 apr_msg->info_length = info_length;
3015                 memcpy(apr_msg->info, info, info_length);
3016         }
3017
3018         if (private_data && private_data_len)
3019                 memcpy(apr_msg->private_data, private_data, private_data_len);
3020 }
3021
3022 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3023                    enum ib_cm_apr_status status,
3024                    void *info,
3025                    u8 info_length,
3026                    const void *private_data,
3027                    u8 private_data_len)
3028 {
3029         struct cm_id_private *cm_id_priv;
3030         struct ib_mad_send_buf *msg;
3031         unsigned long flags;
3032         int ret;
3033
3034         if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3035             (info && info_length > IB_CM_APR_INFO_LENGTH))
3036                 return -EINVAL;
3037
3038         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3039         spin_lock_irqsave(&cm_id_priv->lock, flags);
3040         if (cm_id->state != IB_CM_ESTABLISHED ||
3041             (cm_id->lap_state != IB_CM_LAP_RCVD &&
3042              cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3043                 ret = -EINVAL;
3044                 goto out;
3045         }
3046
3047         ret = cm_alloc_msg(cm_id_priv, &msg);
3048         if (ret)
3049                 goto out;
3050
3051         cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3052                       info, info_length, private_data, private_data_len);
3053         ret = ib_post_send_mad(msg, NULL);
3054         if (ret) {
3055                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3056                 cm_free_msg(msg);
3057                 return ret;
3058         }
3059
3060         cm_id->lap_state = IB_CM_LAP_IDLE;
3061 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3062         return ret;
3063 }
3064 EXPORT_SYMBOL(ib_send_cm_apr);
3065
3066 static int cm_apr_handler(struct cm_work *work)
3067 {
3068         struct cm_id_private *cm_id_priv;
3069         struct cm_apr_msg *apr_msg;
3070         int ret;
3071
3072         apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3073         cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3074                                    apr_msg->local_comm_id);
3075         if (!cm_id_priv)
3076                 return -EINVAL; /* Unmatched reply. */
3077
3078         work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3079         work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3080         work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3081         work->cm_event.private_data = &apr_msg->private_data;
3082
3083         spin_lock_irq(&cm_id_priv->lock);
3084         if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3085             (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3086              cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3087                 spin_unlock_irq(&cm_id_priv->lock);
3088                 goto out;
3089         }
3090         cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3091         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3092         cm_id_priv->msg = NULL;
3093
3094         ret = atomic_inc_and_test(&cm_id_priv->work_count);
3095         if (!ret)
3096                 list_add_tail(&work->list, &cm_id_priv->work_list);
3097         spin_unlock_irq(&cm_id_priv->lock);
3098
3099         if (ret)
3100                 cm_process_work(cm_id_priv, work);
3101         else
3102                 cm_deref_id(cm_id_priv);
3103         return 0;
3104 out:
3105         cm_deref_id(cm_id_priv);
3106         return -EINVAL;
3107 }
3108
3109 static int cm_timewait_handler(struct cm_work *work)
3110 {
3111         struct cm_timewait_info *timewait_info;
3112         struct cm_id_private *cm_id_priv;
3113         int ret;
3114
3115         timewait_info = (struct cm_timewait_info *)work;
3116         spin_lock_irq(&cm.lock);
3117         list_del(&timewait_info->list);
3118         spin_unlock_irq(&cm.lock);
3119
3120         cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3121                                    timewait_info->work.remote_id);
3122         if (!cm_id_priv)
3123                 return -EINVAL;
3124
3125         spin_lock_irq(&cm_id_priv->lock);
3126         if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3127             cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3128                 spin_unlock_irq(&cm_id_priv->lock);
3129                 goto out;
3130         }
3131         cm_id_priv->id.state = IB_CM_IDLE;
3132         ret = atomic_inc_and_test(&cm_id_priv->work_count);
3133         if (!ret)
3134                 list_add_tail(&work->list, &cm_id_priv->work_list);
3135         spin_unlock_irq(&cm_id_priv->lock);
3136
3137         if (ret)
3138                 cm_process_work(cm_id_priv, work);
3139         else
3140                 cm_deref_id(cm_id_priv);
3141         return 0;
3142 out:
3143         cm_deref_id(cm_id_priv);
3144         return -EINVAL;
3145 }
3146
3147 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3148                                struct cm_id_private *cm_id_priv,
3149                                struct ib_cm_sidr_req_param *param)
3150 {
3151         cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3152                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3153         sidr_req_msg->request_id = cm_id_priv->id.local_id;
3154         sidr_req_msg->pkey = param->path->pkey;
3155         sidr_req_msg->service_id = param->service_id;
3156
3157         if (param->private_data && param->private_data_len)
3158                 memcpy(sidr_req_msg->private_data, param->private_data,
3159                        param->private_data_len);
3160 }
3161
3162 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3163                         struct ib_cm_sidr_req_param *param)
3164 {
3165         struct cm_id_private *cm_id_priv;
3166         struct ib_mad_send_buf *msg;
3167         unsigned long flags;
3168         int ret;
3169
3170         if (!param->path || (param->private_data &&
3171              param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3172                 return -EINVAL;
3173
3174         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3175         ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3176         if (ret)
3177                 goto out;
3178
3179         cm_id->service_id = param->service_id;
3180         cm_id->service_mask = ~cpu_to_be64(0);
3181         cm_id_priv->timeout_ms = param->timeout_ms;
3182         cm_id_priv->max_cm_retries = param->max_cm_retries;
3183         ret = cm_alloc_msg(cm_id_priv, &msg);
3184         if (ret)
3185                 goto out;
3186
3187         cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3188                            param);
3189         msg->timeout_ms = cm_id_priv->timeout_ms;
3190         msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3191
3192         spin_lock_irqsave(&cm_id_priv->lock, flags);
3193         if (cm_id->state == IB_CM_IDLE)
3194                 ret = ib_post_send_mad(msg, NULL);
3195         else
3196                 ret = -EINVAL;
3197
3198         if (ret) {
3199                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3200                 cm_free_msg(msg);
3201                 goto out;
3202         }
3203         cm_id->state = IB_CM_SIDR_REQ_SENT;
3204         cm_id_priv->msg = msg;
3205         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3206 out:
3207         return ret;
3208 }
3209 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3210
3211 static void cm_format_sidr_req_event(struct cm_work *work,
3212                                      struct ib_cm_id *listen_id)
3213 {
3214         struct cm_sidr_req_msg *sidr_req_msg;
3215         struct ib_cm_sidr_req_event_param *param;
3216
3217         sidr_req_msg = (struct cm_sidr_req_msg *)
3218                                 work->mad_recv_wc->recv_buf.mad;
3219         param = &work->cm_event.param.sidr_req_rcvd;
3220         param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3221         param->listen_id = listen_id;
3222         param->service_id = sidr_req_msg->service_id;
3223         param->bth_pkey = cm_get_bth_pkey(work);
3224         param->port = work->port->port_num;
3225         work->cm_event.private_data = &sidr_req_msg->private_data;
3226 }
3227
3228 static int cm_sidr_req_handler(struct cm_work *work)
3229 {
3230         struct ib_cm_id *cm_id;
3231         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3232         struct cm_sidr_req_msg *sidr_req_msg;
3233         struct ib_wc *wc;
3234
3235         cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3236         if (IS_ERR(cm_id))
3237                 return PTR_ERR(cm_id);
3238         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3239
3240         /* Record SGID/SLID and request ID for lookup. */
3241         sidr_req_msg = (struct cm_sidr_req_msg *)
3242                                 work->mad_recv_wc->recv_buf.mad;
3243         wc = work->mad_recv_wc->wc;
3244         cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3245         cm_id_priv->av.dgid.global.interface_id = 0;
3246         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3247                                 work->mad_recv_wc->recv_buf.grh,
3248                                 &cm_id_priv->av);
3249         cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3250         cm_id_priv->tid = sidr_req_msg->hdr.tid;
3251         atomic_inc(&cm_id_priv->work_count);
3252
3253         spin_lock_irq(&cm.lock);
3254         cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3255         if (cur_cm_id_priv) {
3256                 spin_unlock_irq(&cm.lock);
3257                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3258                                 counter[CM_SIDR_REQ_COUNTER]);
3259                 goto out; /* Duplicate message. */
3260         }
3261         cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3262         cur_cm_id_priv = cm_find_listen(cm_id->device,
3263                                         sidr_req_msg->service_id);
3264         if (!cur_cm_id_priv) {
3265                 spin_unlock_irq(&cm.lock);
3266                 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3267                 goto out; /* No match. */
3268         }
3269         atomic_inc(&cur_cm_id_priv->refcount);
3270         atomic_inc(&cm_id_priv->refcount);
3271         spin_unlock_irq(&cm.lock);
3272
3273         cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3274         cm_id_priv->id.context = cur_cm_id_priv->id.context;
3275         cm_id_priv->id.service_id = sidr_req_msg->service_id;
3276         cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3277
3278         cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3279         cm_process_work(cm_id_priv, work);
3280         cm_deref_id(cur_cm_id_priv);
3281         return 0;
3282 out:
3283         ib_destroy_cm_id(&cm_id_priv->id);
3284         return -EINVAL;
3285 }
3286
3287 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3288                                struct cm_id_private *cm_id_priv,
3289                                struct ib_cm_sidr_rep_param *param)
3290 {
3291         cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3292                           cm_id_priv->tid);
3293         sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3294         sidr_rep_msg->status = param->status;
3295         cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3296         sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3297         sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3298
3299         if (param->info && param->info_length)
3300                 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3301
3302         if (param->private_data && param->private_data_len)
3303                 memcpy(sidr_rep_msg->private_data, param->private_data,
3304                        param->private_data_len);
3305 }
3306
3307 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3308                         struct ib_cm_sidr_rep_param *param)
3309 {
3310         struct cm_id_private *cm_id_priv;
3311         struct ib_mad_send_buf *msg;
3312         unsigned long flags;
3313         int ret;
3314
3315         if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3316             (param->private_data &&
3317              param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3318                 return -EINVAL;
3319
3320         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3321         spin_lock_irqsave(&cm_id_priv->lock, flags);
3322         if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3323                 ret = -EINVAL;
3324                 goto error;
3325         }
3326
3327         ret = cm_alloc_msg(cm_id_priv, &msg);
3328         if (ret)
3329                 goto error;
3330
3331         cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3332                            param);
3333         ret = ib_post_send_mad(msg, NULL);
3334         if (ret) {
3335                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3336                 cm_free_msg(msg);
3337                 return ret;
3338         }
3339         cm_id->state = IB_CM_IDLE;
3340         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3341
3342         spin_lock_irqsave(&cm.lock, flags);
3343         if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3344                 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3345                 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3346         }
3347         spin_unlock_irqrestore(&cm.lock, flags);
3348         return 0;
3349
3350 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3351         return ret;
3352 }
3353 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3354
3355 static void cm_format_sidr_rep_event(struct cm_work *work)
3356 {
3357         struct cm_sidr_rep_msg *sidr_rep_msg;
3358         struct ib_cm_sidr_rep_event_param *param;
3359
3360         sidr_rep_msg = (struct cm_sidr_rep_msg *)
3361                                 work->mad_recv_wc->recv_buf.mad;
3362         param = &work->cm_event.param.sidr_rep_rcvd;
3363         param->status = sidr_rep_msg->status;
3364         param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3365         param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3366         param->info = &sidr_rep_msg->info;
3367         param->info_len = sidr_rep_msg->info_length;
3368         work->cm_event.private_data = &sidr_rep_msg->private_data;
3369 }
3370
3371 static int cm_sidr_rep_handler(struct cm_work *work)
3372 {
3373         struct cm_sidr_rep_msg *sidr_rep_msg;
3374         struct cm_id_private *cm_id_priv;
3375
3376         sidr_rep_msg = (struct cm_sidr_rep_msg *)
3377                                 work->mad_recv_wc->recv_buf.mad;
3378         cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3379         if (!cm_id_priv)
3380                 return -EINVAL; /* Unmatched reply. */
3381
3382         spin_lock_irq(&cm_id_priv->lock);
3383         if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3384                 spin_unlock_irq(&cm_id_priv->lock);
3385                 goto out;
3386         }
3387         cm_id_priv->id.state = IB_CM_IDLE;
3388         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3389         spin_unlock_irq(&cm_id_priv->lock);
3390
3391         cm_format_sidr_rep_event(work);
3392         cm_process_work(cm_id_priv, work);
3393         return 0;
3394 out:
3395         cm_deref_id(cm_id_priv);
3396         return -EINVAL;
3397 }
3398
3399 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3400                                   enum ib_wc_status wc_status)
3401 {
3402         struct cm_id_private *cm_id_priv;
3403         struct ib_cm_event cm_event;
3404         enum ib_cm_state state;
3405         int ret;
3406
3407         memset(&cm_event, 0, sizeof cm_event);
3408         cm_id_priv = msg->context[0];
3409
3410         /* Discard old sends or ones without a response. */
3411         spin_lock_irq(&cm_id_priv->lock);
3412         state = (enum ib_cm_state) (unsigned long) msg->context[1];
3413         if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3414                 goto discard;
3415
3416         pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3417                              state, ib_wc_status_msg(wc_status));
3418         switch (state) {
3419         case IB_CM_REQ_SENT:
3420         case IB_CM_MRA_REQ_RCVD:
3421                 cm_reset_to_idle(cm_id_priv);
3422                 cm_event.event = IB_CM_REQ_ERROR;
3423                 break;
3424         case IB_CM_REP_SENT:
3425         case IB_CM_MRA_REP_RCVD:
3426                 cm_reset_to_idle(cm_id_priv);
3427                 cm_event.event = IB_CM_REP_ERROR;
3428                 break;
3429         case IB_CM_DREQ_SENT:
3430                 cm_enter_timewait(cm_id_priv);
3431                 cm_event.event = IB_CM_DREQ_ERROR;
3432                 break;
3433         case IB_CM_SIDR_REQ_SENT:
3434                 cm_id_priv->id.state = IB_CM_IDLE;
3435                 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3436                 break;
3437         default:
3438                 goto discard;
3439         }
3440         spin_unlock_irq(&cm_id_priv->lock);
3441         cm_event.param.send_status = wc_status;
3442
3443         /* No other events can occur on the cm_id at this point. */
3444         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3445         cm_free_msg(msg);
3446         if (ret)
3447                 ib_destroy_cm_id(&cm_id_priv->id);
3448         return;
3449 discard:
3450         spin_unlock_irq(&cm_id_priv->lock);
3451         cm_free_msg(msg);
3452 }
3453
3454 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3455                             struct ib_mad_send_wc *mad_send_wc)
3456 {
3457         struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3458         struct cm_port *port;
3459         u16 attr_index;
3460
3461         port = mad_agent->context;
3462         attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3463                                   msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3464
3465         /*
3466          * If the send was in response to a received message (context[0] is not
3467          * set to a cm_id), and is not a REJ, then it is a send that was
3468          * manually retried.
3469          */
3470         if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3471                 msg->retries = 1;
3472
3473         atomic_long_add(1 + msg->retries,
3474                         &port->counter_group[CM_XMIT].counter[attr_index]);
3475         if (msg->retries)
3476                 atomic_long_add(msg->retries,
3477                                 &port->counter_group[CM_XMIT_RETRIES].
3478                                 counter[attr_index]);
3479
3480         switch (mad_send_wc->status) {
3481         case IB_WC_SUCCESS:
3482         case IB_WC_WR_FLUSH_ERR:
3483                 cm_free_msg(msg);
3484                 break;
3485         default:
3486                 if (msg->context[0] && msg->context[1])
3487                         cm_process_send_error(msg, mad_send_wc->status);
3488                 else
3489                         cm_free_msg(msg);
3490                 break;
3491         }
3492 }
3493
3494 static void cm_work_handler(struct work_struct *_work)
3495 {
3496         struct cm_work *work = container_of(_work, struct cm_work, work.work);
3497         int ret;
3498
3499         switch (work->cm_event.event) {
3500         case IB_CM_REQ_RECEIVED:
3501                 ret = cm_req_handler(work);
3502                 break;
3503         case IB_CM_MRA_RECEIVED:
3504                 ret = cm_mra_handler(work);
3505                 break;
3506         case IB_CM_REJ_RECEIVED:
3507                 ret = cm_rej_handler(work);
3508                 break;
3509         case IB_CM_REP_RECEIVED:
3510                 ret = cm_rep_handler(work);
3511                 break;
3512         case IB_CM_RTU_RECEIVED:
3513                 ret = cm_rtu_handler(work);
3514                 break;
3515         case IB_CM_USER_ESTABLISHED:
3516                 ret = cm_establish_handler(work);
3517                 break;
3518         case IB_CM_DREQ_RECEIVED:
3519                 ret = cm_dreq_handler(work);
3520                 break;
3521         case IB_CM_DREP_RECEIVED:
3522                 ret = cm_drep_handler(work);
3523                 break;
3524         case IB_CM_SIDR_REQ_RECEIVED:
3525                 ret = cm_sidr_req_handler(work);
3526                 break;
3527         case IB_CM_SIDR_REP_RECEIVED:
3528                 ret = cm_sidr_rep_handler(work);
3529                 break;
3530         case IB_CM_LAP_RECEIVED:
3531                 ret = cm_lap_handler(work);
3532                 break;
3533         case IB_CM_APR_RECEIVED:
3534                 ret = cm_apr_handler(work);
3535                 break;
3536         case IB_CM_TIMEWAIT_EXIT:
3537                 ret = cm_timewait_handler(work);
3538                 break;
3539         default:
3540                 ret = -EINVAL;
3541                 break;
3542         }
3543         if (ret)
3544                 cm_free_work(work);
3545 }
3546
3547 static int cm_establish(struct ib_cm_id *cm_id)
3548 {
3549         struct cm_id_private *cm_id_priv;
3550         struct cm_work *work;
3551         unsigned long flags;
3552         int ret = 0;
3553         struct cm_device *cm_dev;
3554
3555         cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3556         if (!cm_dev)
3557                 return -ENODEV;
3558
3559         work = kmalloc(sizeof *work, GFP_ATOMIC);
3560         if (!work)
3561                 return -ENOMEM;
3562
3563         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3564         spin_lock_irqsave(&cm_id_priv->lock, flags);
3565         switch (cm_id->state)
3566         {
3567         case IB_CM_REP_SENT:
3568         case IB_CM_MRA_REP_RCVD:
3569                 cm_id->state = IB_CM_ESTABLISHED;
3570                 break;
3571         case IB_CM_ESTABLISHED:
3572                 ret = -EISCONN;
3573                 break;
3574         default:
3575                 ret = -EINVAL;
3576                 break;
3577         }
3578         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3579
3580         if (ret) {
3581                 kfree(work);
3582                 goto out;
3583         }
3584
3585         /*
3586          * The CM worker thread may try to destroy the cm_id before it
3587          * can execute this work item.  To prevent potential deadlock,
3588          * we need to find the cm_id once we're in the context of the
3589          * worker thread, rather than holding a reference on it.
3590          */
3591         INIT_DELAYED_WORK(&work->work, cm_work_handler);
3592         work->local_id = cm_id->local_id;
3593         work->remote_id = cm_id->remote_id;
3594         work->mad_recv_wc = NULL;
3595         work->cm_event.event = IB_CM_USER_ESTABLISHED;
3596
3597         /* Check if the device started its remove_one */
3598         spin_lock_irqsave(&cm.lock, flags);
3599         if (!cm_dev->going_down) {
3600                 queue_delayed_work(cm.wq, &work->work, 0);
3601         } else {
3602                 kfree(work);
3603                 ret = -ENODEV;
3604         }
3605         spin_unlock_irqrestore(&cm.lock, flags);
3606
3607 out:
3608         return ret;
3609 }
3610
3611 static int cm_migrate(struct ib_cm_id *cm_id)
3612 {
3613         struct cm_id_private *cm_id_priv;
3614         struct cm_av tmp_av;
3615         unsigned long flags;
3616         int tmp_send_port_not_ready;
3617         int ret = 0;
3618
3619         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3620         spin_lock_irqsave(&cm_id_priv->lock, flags);
3621         if (cm_id->state == IB_CM_ESTABLISHED &&
3622             (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3623              cm_id->lap_state == IB_CM_LAP_IDLE)) {
3624                 cm_id->lap_state = IB_CM_LAP_IDLE;
3625                 /* Swap address vector */
3626                 tmp_av = cm_id_priv->av;
3627                 cm_id_priv->av = cm_id_priv->alt_av;
3628                 cm_id_priv->alt_av = tmp_av;
3629                 /* Swap port send ready state */
3630                 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3631                 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3632                 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3633         } else
3634                 ret = -EINVAL;
3635         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3636
3637         return ret;
3638 }
3639
3640 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3641 {
3642         int ret;
3643
3644         switch (event) {
3645         case IB_EVENT_COMM_EST:
3646                 ret = cm_establish(cm_id);
3647                 break;
3648         case IB_EVENT_PATH_MIG:
3649                 ret = cm_migrate(cm_id);
3650                 break;
3651         default:
3652                 ret = -EINVAL;
3653         }
3654         return ret;
3655 }
3656 EXPORT_SYMBOL(ib_cm_notify);
3657
3658 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3659                             struct ib_mad_send_buf *send_buf,
3660                             struct ib_mad_recv_wc *mad_recv_wc)
3661 {
3662         struct cm_port *port = mad_agent->context;
3663         struct cm_work *work;
3664         enum ib_cm_event_type event;
3665         u16 attr_id;
3666         int paths = 0;
3667         int going_down = 0;
3668
3669         switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3670         case CM_REQ_ATTR_ID:
3671                 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3672                                                     alt_local_lid != 0);
3673                 event = IB_CM_REQ_RECEIVED;
3674                 break;
3675         case CM_MRA_ATTR_ID:
3676                 event = IB_CM_MRA_RECEIVED;
3677                 break;
3678         case CM_REJ_ATTR_ID:
3679                 event = IB_CM_REJ_RECEIVED;
3680                 break;
3681         case CM_REP_ATTR_ID:
3682                 event = IB_CM_REP_RECEIVED;
3683                 break;
3684         case CM_RTU_ATTR_ID:
3685                 event = IB_CM_RTU_RECEIVED;
3686                 break;
3687         case CM_DREQ_ATTR_ID:
3688                 event = IB_CM_DREQ_RECEIVED;
3689                 break;
3690         case CM_DREP_ATTR_ID:
3691                 event = IB_CM_DREP_RECEIVED;
3692                 break;
3693         case CM_SIDR_REQ_ATTR_ID:
3694                 event = IB_CM_SIDR_REQ_RECEIVED;
3695                 break;
3696         case CM_SIDR_REP_ATTR_ID:
3697                 event = IB_CM_SIDR_REP_RECEIVED;
3698                 break;
3699         case CM_LAP_ATTR_ID:
3700                 paths = 1;
3701                 event = IB_CM_LAP_RECEIVED;
3702                 break;
3703         case CM_APR_ATTR_ID:
3704                 event = IB_CM_APR_RECEIVED;
3705                 break;
3706         default:
3707                 ib_free_recv_mad(mad_recv_wc);
3708                 return;
3709         }
3710
3711         attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3712         atomic_long_inc(&port->counter_group[CM_RECV].
3713                         counter[attr_id - CM_ATTR_ID_OFFSET]);
3714
3715         work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3716                        GFP_KERNEL);
3717         if (!work) {
3718                 ib_free_recv_mad(mad_recv_wc);
3719                 return;
3720         }
3721
3722         INIT_DELAYED_WORK(&work->work, cm_work_handler);
3723         work->cm_event.event = event;
3724         work->mad_recv_wc = mad_recv_wc;
3725         work->port = port;
3726
3727         /* Check if the device started its remove_one */
3728         spin_lock_irq(&cm.lock);
3729         if (!port->cm_dev->going_down)
3730                 queue_delayed_work(cm.wq, &work->work, 0);
3731         else
3732                 going_down = 1;
3733         spin_unlock_irq(&cm.lock);
3734
3735         if (going_down) {
3736                 kfree(work);
3737                 ib_free_recv_mad(mad_recv_wc);
3738         }
3739 }
3740
3741 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3742                                 struct ib_qp_attr *qp_attr,
3743                                 int *qp_attr_mask)
3744 {
3745         unsigned long flags;
3746         int ret;
3747
3748         spin_lock_irqsave(&cm_id_priv->lock, flags);
3749         switch (cm_id_priv->id.state) {
3750         case IB_CM_REQ_SENT:
3751         case IB_CM_MRA_REQ_RCVD:
3752         case IB_CM_REQ_RCVD:
3753         case IB_CM_MRA_REQ_SENT:
3754         case IB_CM_REP_RCVD:
3755         case IB_CM_MRA_REP_SENT:
3756         case IB_CM_REP_SENT:
3757         case IB_CM_MRA_REP_RCVD:
3758         case IB_CM_ESTABLISHED:
3759                 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3760                                 IB_QP_PKEY_INDEX | IB_QP_PORT;
3761                 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3762                 if (cm_id_priv->responder_resources)
3763                         qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3764                                                     IB_ACCESS_REMOTE_ATOMIC;
3765                 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3766                 qp_attr->port_num = cm_id_priv->av.port->port_num;
3767                 ret = 0;
3768                 break;
3769         default:
3770                 ret = -EINVAL;
3771                 break;
3772         }
3773         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3774         return ret;
3775 }
3776
3777 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3778                                struct ib_qp_attr *qp_attr,
3779                                int *qp_attr_mask)
3780 {
3781         unsigned long flags;
3782         int ret;
3783
3784         spin_lock_irqsave(&cm_id_priv->lock, flags);
3785         switch (cm_id_priv->id.state) {
3786         case IB_CM_REQ_RCVD:
3787         case IB_CM_MRA_REQ_SENT:
3788         case IB_CM_REP_RCVD:
3789         case IB_CM_MRA_REP_SENT:
3790         case IB_CM_REP_SENT:
3791         case IB_CM_MRA_REP_RCVD:
3792         case IB_CM_ESTABLISHED:
3793                 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3794                                 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3795                 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3796                 qp_attr->path_mtu = cm_id_priv->path_mtu;
3797                 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3798                 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3799                 if (cm_id_priv->qp_type == IB_QPT_RC ||
3800                     cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3801                         *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3802                                          IB_QP_MIN_RNR_TIMER;
3803                         qp_attr->max_dest_rd_atomic =
3804                                         cm_id_priv->responder_resources;
3805                         qp_attr->min_rnr_timer = 0;
3806                 }
3807                 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
3808                         *qp_attr_mask |= IB_QP_ALT_PATH;
3809                         qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3810                         qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3811                         qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3812                         qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3813                 }
3814                 ret = 0;
3815                 break;
3816         default:
3817                 ret = -EINVAL;
3818                 break;
3819         }
3820         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3821         return ret;
3822 }
3823
3824 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3825                                struct ib_qp_attr *qp_attr,
3826                                int *qp_attr_mask)
3827 {
3828         unsigned long flags;
3829         int ret;
3830
3831         spin_lock_irqsave(&cm_id_priv->lock, flags);
3832         switch (cm_id_priv->id.state) {
3833         /* Allow transition to RTS before sending REP */
3834         case IB_CM_REQ_RCVD:
3835         case IB_CM_MRA_REQ_SENT:
3836
3837         case IB_CM_REP_RCVD:
3838         case IB_CM_MRA_REP_SENT:
3839         case IB_CM_REP_SENT:
3840         case IB_CM_MRA_REP_RCVD:
3841         case IB_CM_ESTABLISHED:
3842                 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3843                         *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3844                         qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3845                         switch (cm_id_priv->qp_type) {
3846                         case IB_QPT_RC:
3847                         case IB_QPT_XRC_INI:
3848                                 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3849                                                  IB_QP_MAX_QP_RD_ATOMIC;
3850                                 qp_attr->retry_cnt = cm_id_priv->retry_count;
3851                                 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3852                                 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3853                                 /* fall through */
3854                         case IB_QPT_XRC_TGT:
3855                                 *qp_attr_mask |= IB_QP_TIMEOUT;
3856                                 qp_attr->timeout = cm_id_priv->av.timeout;
3857                                 break;
3858                         default:
3859                                 break;
3860                         }
3861                         if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
3862                                 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3863                                 qp_attr->path_mig_state = IB_MIG_REARM;
3864                         }
3865                 } else {
3866                         *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3867                         qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3868                         qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3869                         qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3870                         qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3871                         qp_attr->path_mig_state = IB_MIG_REARM;
3872                 }
3873                 ret = 0;
3874                 break;
3875         default:
3876                 ret = -EINVAL;
3877                 break;
3878         }
3879         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3880         return ret;
3881 }
3882
3883 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3884                        struct ib_qp_attr *qp_attr,
3885                        int *qp_attr_mask)
3886 {
3887         struct cm_id_private *cm_id_priv;
3888         int ret;
3889
3890         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3891         switch (qp_attr->qp_state) {
3892         case IB_QPS_INIT:
3893                 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3894                 break;
3895         case IB_QPS_RTR:
3896                 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3897                 break;
3898         case IB_QPS_RTS:
3899                 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3900                 break;
3901         default:
3902                 ret = -EINVAL;
3903                 break;
3904         }
3905         return ret;
3906 }
3907 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3908
3909 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3910                                char *buf)
3911 {
3912         struct cm_counter_group *group;
3913         struct cm_counter_attribute *cm_attr;
3914
3915         group = container_of(obj, struct cm_counter_group, obj);
3916         cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3917
3918         return sprintf(buf, "%ld\n",
3919                        atomic_long_read(&group->counter[cm_attr->index]));
3920 }
3921
3922 static const struct sysfs_ops cm_counter_ops = {
3923         .show = cm_show_counter
3924 };
3925
3926 static struct kobj_type cm_counter_obj_type = {
3927         .sysfs_ops = &cm_counter_ops,
3928         .default_attrs = cm_counter_default_attrs
3929 };
3930
3931 static void cm_release_port_obj(struct kobject *obj)
3932 {
3933         struct cm_port *cm_port;
3934
3935         cm_port = container_of(obj, struct cm_port, port_obj);
3936         kfree(cm_port);
3937 }
3938
3939 static struct kobj_type cm_port_obj_type = {
3940         .release = cm_release_port_obj
3941 };
3942
3943 static char *cm_devnode(struct device *dev, umode_t *mode)
3944 {
3945         if (mode)
3946                 *mode = 0666;
3947         return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3948 }
3949
3950 struct class cm_class = {
3951         .owner   = THIS_MODULE,
3952         .name    = "infiniband_cm",
3953         .devnode = cm_devnode,
3954 };
3955 EXPORT_SYMBOL(cm_class);
3956
3957 static int cm_create_port_fs(struct cm_port *port)
3958 {
3959         int i, ret;
3960
3961         ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3962                                    &port->cm_dev->device->kobj,
3963                                    "%d", port->port_num);
3964         if (ret) {
3965                 kfree(port);
3966                 return ret;
3967         }
3968
3969         for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3970                 ret = kobject_init_and_add(&port->counter_group[i].obj,
3971                                            &cm_counter_obj_type,
3972                                            &port->port_obj,
3973                                            "%s", counter_group_names[i]);
3974                 if (ret)
3975                         goto error;
3976         }
3977
3978         return 0;
3979
3980 error:
3981         while (i--)
3982                 kobject_put(&port->counter_group[i].obj);
3983         kobject_put(&port->port_obj);
3984         return ret;
3985
3986 }
3987
3988 static void cm_remove_port_fs(struct cm_port *port)
3989 {
3990         int i;
3991
3992         for (i = 0; i < CM_COUNTER_GROUPS; i++)
3993                 kobject_put(&port->counter_group[i].obj);
3994
3995         kobject_put(&port->port_obj);
3996 }
3997
3998 static void cm_add_one(struct ib_device *ib_device)
3999 {
4000         struct cm_device *cm_dev;
4001         struct cm_port *port;
4002         struct ib_mad_reg_req reg_req = {
4003                 .mgmt_class = IB_MGMT_CLASS_CM,
4004                 .mgmt_class_version = IB_CM_CLASS_VERSION,
4005         };
4006         struct ib_port_modify port_modify = {
4007                 .set_port_cap_mask = IB_PORT_CM_SUP
4008         };
4009         unsigned long flags;
4010         int ret;
4011         int count = 0;
4012         u8 i;
4013
4014         cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
4015                          ib_device->phys_port_cnt, GFP_KERNEL);
4016         if (!cm_dev)
4017                 return;
4018
4019         cm_dev->ib_device = ib_device;
4020         cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4021         cm_dev->going_down = 0;
4022         cm_dev->device = device_create(&cm_class, &ib_device->dev,
4023                                        MKDEV(0, 0), NULL,
4024                                        "%s", ib_device->name);
4025         if (IS_ERR(cm_dev->device)) {
4026                 kfree(cm_dev);
4027                 return;
4028         }
4029
4030         set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4031         for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4032                 if (!rdma_cap_ib_cm(ib_device, i))
4033                         continue;
4034
4035                 port = kzalloc(sizeof *port, GFP_KERNEL);
4036                 if (!port)
4037                         goto error1;
4038
4039                 cm_dev->port[i-1] = port;
4040                 port->cm_dev = cm_dev;
4041                 port->port_num = i;
4042
4043                 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4044                 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4045
4046                 ret = cm_create_port_fs(port);
4047                 if (ret)
4048                         goto error1;
4049
4050                 port->mad_agent = ib_register_mad_agent(ib_device, i,
4051                                                         IB_QPT_GSI,
4052                                                         &reg_req,
4053                                                         0,
4054                                                         cm_send_handler,
4055                                                         cm_recv_handler,
4056                                                         port,
4057                                                         0);
4058                 if (IS_ERR(port->mad_agent))
4059                         goto error2;
4060
4061                 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4062                 if (ret)
4063                         goto error3;
4064
4065                 count++;
4066         }
4067
4068         if (!count)
4069                 goto free;
4070
4071         ib_set_client_data(ib_device, &cm_client, cm_dev);
4072
4073         write_lock_irqsave(&cm.device_lock, flags);
4074         list_add_tail(&cm_dev->list, &cm.device_list);
4075         write_unlock_irqrestore(&cm.device_lock, flags);
4076         return;
4077
4078 error3:
4079         ib_unregister_mad_agent(port->mad_agent);
4080 error2:
4081         cm_remove_port_fs(port);
4082 error1:
4083         port_modify.set_port_cap_mask = 0;
4084         port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4085         while (--i) {
4086                 if (!rdma_cap_ib_cm(ib_device, i))
4087                         continue;
4088
4089                 port = cm_dev->port[i-1];
4090                 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4091                 ib_unregister_mad_agent(port->mad_agent);
4092                 cm_remove_port_fs(port);
4093         }
4094 free:
4095         device_unregister(cm_dev->device);
4096         kfree(cm_dev);
4097 }
4098
4099 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4100 {
4101         struct cm_device *cm_dev = client_data;
4102         struct cm_port *port;
4103         struct cm_id_private *cm_id_priv;
4104         struct ib_mad_agent *cur_mad_agent;
4105         struct ib_port_modify port_modify = {
4106                 .clr_port_cap_mask = IB_PORT_CM_SUP
4107         };
4108         unsigned long flags;
4109         int i;
4110
4111         if (!cm_dev)
4112                 return;
4113
4114         write_lock_irqsave(&cm.device_lock, flags);
4115         list_del(&cm_dev->list);
4116         write_unlock_irqrestore(&cm.device_lock, flags);
4117
4118         spin_lock_irq(&cm.lock);
4119         cm_dev->going_down = 1;
4120         spin_unlock_irq(&cm.lock);
4121
4122         for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4123                 if (!rdma_cap_ib_cm(ib_device, i))
4124                         continue;
4125
4126                 port = cm_dev->port[i-1];
4127                 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4128                 /* Mark all the cm_id's as not valid */
4129                 spin_lock_irq(&cm.lock);
4130                 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4131                         cm_id_priv->altr_send_port_not_ready = 1;
4132                 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4133                         cm_id_priv->prim_send_port_not_ready = 1;
4134                 spin_unlock_irq(&cm.lock);
4135                 /*
4136                  * We flush the queue here after the going_down set, this
4137                  * verify that no new works will be queued in the recv handler,
4138                  * after that we can call the unregister_mad_agent
4139                  */
4140                 flush_workqueue(cm.wq);
4141                 spin_lock_irq(&cm.state_lock);
4142                 cur_mad_agent = port->mad_agent;
4143                 port->mad_agent = NULL;
4144                 spin_unlock_irq(&cm.state_lock);
4145                 ib_unregister_mad_agent(cur_mad_agent);
4146                 cm_remove_port_fs(port);
4147         }
4148
4149         device_unregister(cm_dev->device);
4150         kfree(cm_dev);
4151 }
4152
4153 static int __init ib_cm_init(void)
4154 {
4155         int ret;
4156
4157         memset(&cm, 0, sizeof cm);
4158         INIT_LIST_HEAD(&cm.device_list);
4159         rwlock_init(&cm.device_lock);
4160         spin_lock_init(&cm.lock);
4161         spin_lock_init(&cm.state_lock);
4162         cm.listen_service_table = RB_ROOT;
4163         cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4164         cm.remote_id_table = RB_ROOT;
4165         cm.remote_qp_table = RB_ROOT;
4166         cm.remote_sidr_table = RB_ROOT;
4167         idr_init(&cm.local_id_table);
4168         get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4169         INIT_LIST_HEAD(&cm.timewait_list);
4170
4171         ret = class_register(&cm_class);
4172         if (ret) {
4173                 ret = -ENOMEM;
4174                 goto error1;
4175         }
4176
4177         cm.wq = create_workqueue("ib_cm");
4178         if (!cm.wq) {
4179                 ret = -ENOMEM;
4180                 goto error2;
4181         }
4182
4183         ret = ib_register_client(&cm_client);
4184         if (ret)
4185                 goto error3;
4186
4187         return 0;
4188 error3:
4189         destroy_workqueue(cm.wq);
4190 error2:
4191         class_unregister(&cm_class);
4192 error1:
4193         idr_destroy(&cm.local_id_table);
4194         return ret;
4195 }
4196
4197 static void __exit ib_cm_cleanup(void)
4198 {
4199         struct cm_timewait_info *timewait_info, *tmp;
4200
4201         spin_lock_irq(&cm.lock);
4202         list_for_each_entry(timewait_info, &cm.timewait_list, list)
4203                 cancel_delayed_work(&timewait_info->work.work);
4204         spin_unlock_irq(&cm.lock);
4205
4206         ib_unregister_client(&cm_client);
4207         destroy_workqueue(cm.wq);
4208
4209         list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4210                 list_del(&timewait_info->list);
4211                 kfree(timewait_info);
4212         }
4213
4214         class_unregister(&cm_class);
4215         idr_destroy(&cm.local_id_table);
4216 }
4217
4218 module_init(ib_cm_init);
4219 module_exit(ib_cm_cleanup);
4220