multicast.c revision 324685
1/*
2 * Copyright (c) 2006 Intel Corporation.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define	LINUXKPI_PARAM_PREFIX ibcore_
34
35#include <linux/completion.h>
36#include <linux/dma-mapping.h>
37#include <linux/err.h>
38#include <linux/interrupt.h>
39#include <linux/bitops.h>
40#include <linux/random.h>
41
42#include <rdma/ib_cache.h>
43#include "sa.h"
44
45static void mcast_add_one(struct ib_device *device);
46static void mcast_remove_one(struct ib_device *device);
47
48static struct ib_client mcast_client = {
49	.name   = "ib_multicast",
50	.add    = mcast_add_one,
51	.remove = mcast_remove_one
52};
53
54static struct ib_sa_client	sa_client;
55static struct workqueue_struct	*mcast_wq;
56static union ib_gid mgid0;
57
58struct mcast_device;
59
60struct mcast_port {
61	struct mcast_device	*dev;
62	spinlock_t		lock;
63	struct rb_root		table;
64	atomic_t		refcount;
65	struct completion	comp;
66	u8			port_num;
67};
68
69struct mcast_device {
70	struct ib_device	*device;
71	struct ib_event_handler	event_handler;
72	int			start_port;
73	int			end_port;
74	struct mcast_port	port[0];
75};
76
77enum mcast_state {
78	MCAST_JOINING,
79	MCAST_MEMBER,
80	MCAST_ERROR,
81};
82
83enum mcast_group_state {
84	MCAST_IDLE,
85	MCAST_BUSY,
86	MCAST_GROUP_ERROR,
87	MCAST_PKEY_EVENT
88};
89
90enum {
91	MCAST_INVALID_PKEY_INDEX = 0xFFFF
92};
93
94struct mcast_member;
95
96struct mcast_group {
97	struct ib_sa_mcmember_rec rec;
98	struct rb_node		node;
99	struct mcast_port	*port;
100	spinlock_t		lock;
101	struct work_struct	work;
102	struct list_head	pending_list;
103	struct list_head	active_list;
104	struct mcast_member	*last_join;
105	int			members[3];
106	atomic_t		refcount;
107	enum mcast_group_state	state;
108	struct ib_sa_query	*query;
109	int			query_id;
110	u16			pkey_index;
111	u8			leave_state;
112	int			retries;
113};
114
115struct mcast_member {
116	struct ib_sa_multicast	multicast;
117	struct ib_sa_client	*client;
118	struct mcast_group	*group;
119	struct list_head	list;
120	enum mcast_state	state;
121	atomic_t		refcount;
122	struct completion	comp;
123};
124
125static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
126			 void *context);
127static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
128			  void *context);
129
130static struct mcast_group *mcast_find(struct mcast_port *port,
131				      union ib_gid *mgid)
132{
133	struct rb_node *node = port->table.rb_node;
134	struct mcast_group *group;
135	int ret;
136
137	while (node) {
138		group = rb_entry(node, struct mcast_group, node);
139		ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
140		if (!ret)
141			return group;
142
143		if (ret < 0)
144			node = node->rb_left;
145		else
146			node = node->rb_right;
147	}
148	return NULL;
149}
150
151static struct mcast_group *mcast_insert(struct mcast_port *port,
152					struct mcast_group *group,
153					int allow_duplicates)
154{
155	struct rb_node **link = &port->table.rb_node;
156	struct rb_node *parent = NULL;
157	struct mcast_group *cur_group;
158	int ret;
159
160	while (*link) {
161		parent = *link;
162		cur_group = rb_entry(parent, struct mcast_group, node);
163
164		ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
165			     sizeof group->rec.mgid);
166		if (ret < 0)
167			link = &(*link)->rb_left;
168		else if (ret > 0)
169			link = &(*link)->rb_right;
170		else if (allow_duplicates)
171			link = &(*link)->rb_left;
172		else
173			return cur_group;
174	}
175	rb_link_node(&group->node, parent, link);
176	rb_insert_color(&group->node, &port->table);
177	return NULL;
178}
179
180static void deref_port(struct mcast_port *port)
181{
182	if (atomic_dec_and_test(&port->refcount))
183		complete(&port->comp);
184}
185
186static void release_group(struct mcast_group *group)
187{
188	struct mcast_port *port = group->port;
189	unsigned long flags;
190
191	spin_lock_irqsave(&port->lock, flags);
192	if (atomic_dec_and_test(&group->refcount)) {
193		rb_erase(&group->node, &port->table);
194		spin_unlock_irqrestore(&port->lock, flags);
195		kfree(group);
196		deref_port(port);
197	} else
198		spin_unlock_irqrestore(&port->lock, flags);
199}
200
201static void deref_member(struct mcast_member *member)
202{
203	if (atomic_dec_and_test(&member->refcount))
204		complete(&member->comp);
205}
206
207static void queue_join(struct mcast_member *member)
208{
209	struct mcast_group *group = member->group;
210	unsigned long flags;
211
212	spin_lock_irqsave(&group->lock, flags);
213	list_add_tail(&member->list, &group->pending_list);
214	if (group->state == MCAST_IDLE) {
215		group->state = MCAST_BUSY;
216		atomic_inc(&group->refcount);
217		queue_work(mcast_wq, &group->work);
218	}
219	spin_unlock_irqrestore(&group->lock, flags);
220}
221
222/*
223 * A multicast group has three types of members: full member, non member, and
224 * send only member.  We need to keep track of the number of members of each
225 * type based on their join state.  Adjust the number of members the belong to
226 * the specified join states.
227 */
228static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
229{
230	int i;
231
232	for (i = 0; i < 3; i++, join_state >>= 1)
233		if (join_state & 0x1)
234			group->members[i] += inc;
235}
236
237/*
238 * If a multicast group has zero members left for a particular join state, but
239 * the group is still a member with the SA, we need to leave that join state.
240 * Determine which join states we still belong to, but that do not have any
241 * active members.
242 */
243static u8 get_leave_state(struct mcast_group *group)
244{
245	u8 leave_state = 0;
246	int i;
247
248	for (i = 0; i < 3; i++)
249		if (!group->members[i])
250			leave_state |= (0x1 << i);
251
252	return leave_state & group->rec.join_state;
253}
254
255static int cmp_rec(struct ib_sa_mcmember_rec *src,
256		   struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
257{
258	/* MGID must already match */
259
260	if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
261	    memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
262		return -EINVAL;
263	if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
264		return -EINVAL;
265	if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
266		return -EINVAL;
267	if (ib_sa_check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
268				 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
269				 src->mtu, dst->mtu))
270		return -EINVAL;
271	if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
272	    src->traffic_class != dst->traffic_class)
273		return -EINVAL;
274	if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
275		return -EINVAL;
276	if (ib_sa_check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
277				 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
278				 src->rate, dst->rate))
279		return -EINVAL;
280	if (ib_sa_check_selector(comp_mask,
281				 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
282				 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
283				 dst->packet_life_time_selector,
284				 src->packet_life_time, dst->packet_life_time))
285		return -EINVAL;
286	if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
287		return -EINVAL;
288	if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
289	    src->flow_label != dst->flow_label)
290		return -EINVAL;
291	if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
292	    src->hop_limit != dst->hop_limit)
293		return -EINVAL;
294	if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
295		return -EINVAL;
296
297	/* join_state checked separately, proxy_join ignored */
298
299	return 0;
300}
301
302static int send_join(struct mcast_group *group, struct mcast_member *member)
303{
304	struct mcast_port *port = group->port;
305	int ret;
306
307	group->last_join = member;
308	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
309				       port->port_num, IB_MGMT_METHOD_SET,
310				       &member->multicast.rec,
311				       member->multicast.comp_mask,
312				       3000, GFP_KERNEL, join_handler, group,
313				       &group->query);
314	if (ret >= 0) {
315		group->query_id = ret;
316		ret = 0;
317	}
318	return ret;
319}
320
321static int send_leave(struct mcast_group *group, u8 leave_state)
322{
323	struct mcast_port *port = group->port;
324	struct ib_sa_mcmember_rec rec;
325	int ret;
326
327	rec = group->rec;
328	rec.join_state = leave_state;
329	group->leave_state = leave_state;
330
331	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
332				       port->port_num, IB_SA_METHOD_DELETE, &rec,
333				       IB_SA_MCMEMBER_REC_MGID     |
334				       IB_SA_MCMEMBER_REC_PORT_GID |
335				       IB_SA_MCMEMBER_REC_JOIN_STATE,
336				       3000, GFP_KERNEL, leave_handler,
337				       group, &group->query);
338	if (ret >= 0) {
339		group->query_id = ret;
340		ret = 0;
341	}
342	return ret;
343}
344
345static void join_group(struct mcast_group *group, struct mcast_member *member,
346		       u8 join_state)
347{
348	member->state = MCAST_MEMBER;
349	adjust_membership(group, join_state, 1);
350	group->rec.join_state |= join_state;
351	member->multicast.rec = group->rec;
352	member->multicast.rec.join_state = join_state;
353	list_move(&member->list, &group->active_list);
354}
355
356static int fail_join(struct mcast_group *group, struct mcast_member *member,
357		     int status)
358{
359	spin_lock_irq(&group->lock);
360	list_del_init(&member->list);
361	spin_unlock_irq(&group->lock);
362	return member->multicast.callback(status, &member->multicast);
363}
364
365static void process_group_error(struct mcast_group *group)
366{
367	struct mcast_member *member;
368	int ret = 0;
369	u16 pkey_index;
370
371	if (group->state == MCAST_PKEY_EVENT)
372		ret = ib_find_pkey(group->port->dev->device,
373				   group->port->port_num,
374				   be16_to_cpu(group->rec.pkey), &pkey_index);
375
376	spin_lock_irq(&group->lock);
377	if (group->state == MCAST_PKEY_EVENT && !ret &&
378	    group->pkey_index == pkey_index)
379		goto out;
380
381	while (!list_empty(&group->active_list)) {
382		member = list_entry(group->active_list.next,
383				    struct mcast_member, list);
384		atomic_inc(&member->refcount);
385		list_del_init(&member->list);
386		adjust_membership(group, member->multicast.rec.join_state, -1);
387		member->state = MCAST_ERROR;
388		spin_unlock_irq(&group->lock);
389
390		ret = member->multicast.callback(-ENETRESET,
391						 &member->multicast);
392		deref_member(member);
393		if (ret)
394			ib_sa_free_multicast(&member->multicast);
395		spin_lock_irq(&group->lock);
396	}
397
398	group->rec.join_state = 0;
399out:
400	group->state = MCAST_BUSY;
401	spin_unlock_irq(&group->lock);
402}
403
404static void mcast_work_handler(struct work_struct *work)
405{
406	struct mcast_group *group;
407	struct mcast_member *member;
408	struct ib_sa_multicast *multicast;
409	int status, ret;
410	u8 join_state;
411
412	group = container_of(work, typeof(*group), work);
413retest:
414	spin_lock_irq(&group->lock);
415	while (!list_empty(&group->pending_list) ||
416	       (group->state != MCAST_BUSY)) {
417
418		if (group->state != MCAST_BUSY) {
419			spin_unlock_irq(&group->lock);
420			process_group_error(group);
421			goto retest;
422		}
423
424		member = list_entry(group->pending_list.next,
425				    struct mcast_member, list);
426		multicast = &member->multicast;
427		join_state = multicast->rec.join_state;
428		atomic_inc(&member->refcount);
429
430		if (join_state == (group->rec.join_state & join_state)) {
431			status = cmp_rec(&group->rec, &multicast->rec,
432					 multicast->comp_mask);
433			if (!status)
434				join_group(group, member, join_state);
435			else
436				list_del_init(&member->list);
437			spin_unlock_irq(&group->lock);
438			ret = multicast->callback(status, multicast);
439		} else {
440			spin_unlock_irq(&group->lock);
441			status = send_join(group, member);
442			if (!status) {
443				deref_member(member);
444				return;
445			}
446			ret = fail_join(group, member, status);
447		}
448
449		deref_member(member);
450		if (ret)
451			ib_sa_free_multicast(&member->multicast);
452		spin_lock_irq(&group->lock);
453	}
454
455	join_state = get_leave_state(group);
456	if (join_state) {
457		group->rec.join_state &= ~join_state;
458		spin_unlock_irq(&group->lock);
459		if (send_leave(group, join_state))
460			goto retest;
461	} else {
462		group->state = MCAST_IDLE;
463		spin_unlock_irq(&group->lock);
464		release_group(group);
465	}
466}
467
468/*
469 * Fail a join request if it is still active - at the head of the pending queue.
470 */
471static void process_join_error(struct mcast_group *group, int status)
472{
473	struct mcast_member *member;
474	int ret;
475
476	spin_lock_irq(&group->lock);
477	member = list_entry(group->pending_list.next,
478			    struct mcast_member, list);
479	if (group->last_join == member) {
480		atomic_inc(&member->refcount);
481		list_del_init(&member->list);
482		spin_unlock_irq(&group->lock);
483		ret = member->multicast.callback(status, &member->multicast);
484		deref_member(member);
485		if (ret)
486			ib_sa_free_multicast(&member->multicast);
487	} else
488		spin_unlock_irq(&group->lock);
489}
490
491static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
492			 void *context)
493{
494	struct mcast_group *group = context;
495	u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
496
497	if (status)
498		process_join_error(group, status);
499	else {
500		ib_find_pkey(group->port->dev->device, group->port->port_num,
501			     be16_to_cpu(rec->pkey), &pkey_index);
502
503		spin_lock_irq(&group->port->lock);
504		group->rec = *rec;
505		if (group->state == MCAST_BUSY &&
506		    group->pkey_index == MCAST_INVALID_PKEY_INDEX)
507			group->pkey_index = pkey_index;
508		if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
509			rb_erase(&group->node, &group->port->table);
510			mcast_insert(group->port, group, 1);
511		}
512		spin_unlock_irq(&group->port->lock);
513	}
514	mcast_work_handler(&group->work);
515}
516
517static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
518			  void *context)
519{
520	struct mcast_group *group = context;
521
522	if (status && (group->retries > 0) &&
523	    !send_leave(group, group->leave_state))
524		group->retries--;
525	else
526		mcast_work_handler(&group->work);
527}
528
529static struct mcast_group *acquire_group(struct mcast_port *port,
530					 union ib_gid *mgid, gfp_t gfp_mask)
531{
532	struct mcast_group *group, *cur_group;
533	unsigned long flags;
534	int is_mgid0;
535
536	is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
537	if (!is_mgid0) {
538		spin_lock_irqsave(&port->lock, flags);
539		group = mcast_find(port, mgid);
540		if (group)
541			goto found;
542		spin_unlock_irqrestore(&port->lock, flags);
543	}
544
545	group = kzalloc(sizeof *group, gfp_mask);
546	if (!group)
547		return NULL;
548
549	group->retries = 3;
550	group->port = port;
551	group->rec.mgid = *mgid;
552	group->pkey_index = MCAST_INVALID_PKEY_INDEX;
553	INIT_LIST_HEAD(&group->pending_list);
554	INIT_LIST_HEAD(&group->active_list);
555	INIT_WORK(&group->work, mcast_work_handler);
556	spin_lock_init(&group->lock);
557
558	spin_lock_irqsave(&port->lock, flags);
559	cur_group = mcast_insert(port, group, is_mgid0);
560	if (cur_group) {
561		kfree(group);
562		group = cur_group;
563	} else
564		atomic_inc(&port->refcount);
565found:
566	atomic_inc(&group->refcount);
567	spin_unlock_irqrestore(&port->lock, flags);
568	return group;
569}
570
571/*
572 * We serialize all join requests to a single group to make our lives much
573 * easier.  Otherwise, two users could try to join the same group
574 * simultaneously, with different configurations, one could leave while the
575 * join is in progress, etc., which makes locking around error recovery
576 * difficult.
577 */
578struct ib_sa_multicast *
579ib_sa_join_multicast(struct ib_sa_client *client,
580		     struct ib_device *device, u8 port_num,
581		     struct ib_sa_mcmember_rec *rec,
582		     ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
583		     int (*callback)(int status,
584				     struct ib_sa_multicast *multicast),
585		     void *context)
586{
587	struct mcast_device *dev;
588	struct mcast_member *member;
589	struct ib_sa_multicast *multicast;
590	int ret;
591
592	dev = ib_get_client_data(device, &mcast_client);
593	if (!dev)
594		return ERR_PTR(-ENODEV);
595
596	member = kmalloc(sizeof *member, gfp_mask);
597	if (!member)
598		return ERR_PTR(-ENOMEM);
599
600	ib_sa_client_get(client);
601	member->client = client;
602	member->multicast.rec = *rec;
603	member->multicast.comp_mask = comp_mask;
604	member->multicast.callback = callback;
605	member->multicast.context = context;
606	init_completion(&member->comp);
607	atomic_set(&member->refcount, 1);
608	member->state = MCAST_JOINING;
609
610	member->group = acquire_group(&dev->port[port_num - dev->start_port],
611				      &rec->mgid, gfp_mask);
612	if (!member->group) {
613		ret = -ENOMEM;
614		goto err;
615	}
616
617	/*
618	 * The user will get the multicast structure in their callback.  They
619	 * could then free the multicast structure before we can return from
620	 * this routine.  So we save the pointer to return before queuing
621	 * any callback.
622	 */
623	multicast = &member->multicast;
624	queue_join(member);
625	return multicast;
626
627err:
628	ib_sa_client_put(client);
629	kfree(member);
630	return ERR_PTR(ret);
631}
632EXPORT_SYMBOL(ib_sa_join_multicast);
633
634void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
635{
636	struct mcast_member *member;
637	struct mcast_group *group;
638
639	member = container_of(multicast, struct mcast_member, multicast);
640	group = member->group;
641
642	spin_lock_irq(&group->lock);
643	if (member->state == MCAST_MEMBER)
644		adjust_membership(group, multicast->rec.join_state, -1);
645
646	list_del_init(&member->list);
647
648	if (group->state == MCAST_IDLE) {
649		group->state = MCAST_BUSY;
650		spin_unlock_irq(&group->lock);
651		/* Continue to hold reference on group until callback */
652		queue_work(mcast_wq, &group->work);
653	} else {
654		spin_unlock_irq(&group->lock);
655		release_group(group);
656	}
657
658	deref_member(member);
659	wait_for_completion(&member->comp);
660	ib_sa_client_put(member->client);
661	kfree(member);
662}
663EXPORT_SYMBOL(ib_sa_free_multicast);
664
665int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
666			   union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
667{
668	struct mcast_device *dev;
669	struct mcast_port *port;
670	struct mcast_group *group;
671	unsigned long flags;
672	int ret = 0;
673
674	dev = ib_get_client_data(device, &mcast_client);
675	if (!dev)
676		return -ENODEV;
677
678	port = &dev->port[port_num - dev->start_port];
679	spin_lock_irqsave(&port->lock, flags);
680	group = mcast_find(port, mgid);
681	if (group)
682		*rec = group->rec;
683	else
684		ret = -EADDRNOTAVAIL;
685	spin_unlock_irqrestore(&port->lock, flags);
686
687	return ret;
688}
689EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
690
691int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
692			     struct ib_sa_mcmember_rec *rec,
693			     struct ib_ah_attr *ah_attr)
694{
695	int ret;
696	u16 gid_index;
697	u8 p;
698
699	ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
700	if (ret)
701		return ret;
702
703	memset(ah_attr, 0, sizeof *ah_attr);
704	ah_attr->dlid = be16_to_cpu(rec->mlid);
705	ah_attr->sl = rec->sl;
706	ah_attr->port_num = port_num;
707	ah_attr->static_rate = rec->rate;
708
709	ah_attr->ah_flags = IB_AH_GRH;
710	ah_attr->grh.dgid = rec->mgid;
711
712	ah_attr->grh.sgid_index = (u8) gid_index;
713	ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
714	ah_attr->grh.hop_limit = rec->hop_limit;
715	ah_attr->grh.traffic_class = rec->traffic_class;
716
717	return 0;
718}
719EXPORT_SYMBOL(ib_init_ah_from_mcmember);
720
721static void mcast_groups_event(struct mcast_port *port,
722			       enum mcast_group_state state)
723{
724	struct mcast_group *group;
725	struct rb_node *node;
726	unsigned long flags;
727
728	spin_lock_irqsave(&port->lock, flags);
729	for (node = rb_first(&port->table); node; node = rb_next(node)) {
730		group = rb_entry(node, struct mcast_group, node);
731		spin_lock(&group->lock);
732		if (group->state == MCAST_IDLE) {
733			atomic_inc(&group->refcount);
734			queue_work(mcast_wq, &group->work);
735		}
736		if (group->state != MCAST_GROUP_ERROR)
737			group->state = state;
738		spin_unlock(&group->lock);
739	}
740	spin_unlock_irqrestore(&port->lock, flags);
741}
742
743static void mcast_event_handler(struct ib_event_handler *handler,
744				struct ib_event *event)
745{
746	struct mcast_device *dev;
747	int index;
748
749	dev = container_of(handler, struct mcast_device, event_handler);
750	if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
751	    IB_LINK_LAYER_INFINIBAND)
752		return;
753
754	index = event->element.port_num - dev->start_port;
755
756	switch (event->event) {
757	case IB_EVENT_PORT_ERR:
758	case IB_EVENT_LID_CHANGE:
759	case IB_EVENT_SM_CHANGE:
760	case IB_EVENT_CLIENT_REREGISTER:
761		mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
762		break;
763	case IB_EVENT_PKEY_CHANGE:
764		mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
765		break;
766	default:
767		break;
768	}
769}
770
771static void mcast_add_one(struct ib_device *device)
772{
773	struct mcast_device *dev;
774	struct mcast_port *port;
775	int i;
776	int count = 0;
777
778	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
779		return;
780
781	dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
782		      GFP_KERNEL);
783	if (!dev)
784		return;
785
786	if (device->node_type == RDMA_NODE_IB_SWITCH)
787		dev->start_port = dev->end_port = 0;
788	else {
789		dev->start_port = 1;
790		dev->end_port = device->phys_port_cnt;
791	}
792
793	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
794		if (rdma_port_get_link_layer(device, dev->start_port + i) !=
795		    IB_LINK_LAYER_INFINIBAND)
796			continue;
797		port = &dev->port[i];
798		port->dev = dev;
799		port->port_num = dev->start_port + i;
800		spin_lock_init(&port->lock);
801		port->table = RB_ROOT;
802		init_completion(&port->comp);
803		atomic_set(&port->refcount, 1);
804		++count;
805	}
806
807	if (!count) {
808		kfree(dev);
809		return;
810	}
811
812	dev->device = device;
813	ib_set_client_data(device, &mcast_client, dev);
814
815	INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
816	ib_register_event_handler(&dev->event_handler);
817}
818
819static void mcast_remove_one(struct ib_device *device)
820{
821	struct mcast_device *dev;
822	struct mcast_port *port;
823	int i;
824
825	dev = ib_get_client_data(device, &mcast_client);
826	if (!dev)
827		return;
828
829	ib_unregister_event_handler(&dev->event_handler);
830	flush_workqueue(mcast_wq);
831
832	for (i = 0; i <= dev->end_port - dev->start_port; i++) {
833		if (rdma_port_get_link_layer(device, dev->start_port + i) ==
834		    IB_LINK_LAYER_INFINIBAND) {
835			port = &dev->port[i];
836			deref_port(port);
837			wait_for_completion(&port->comp);
838		}
839	}
840
841	kfree(dev);
842}
843
844int mcast_init(void)
845{
846	int ret;
847
848	mcast_wq = create_singlethread_workqueue("ib_mcast");
849	if (!mcast_wq)
850		return -ENOMEM;
851
852	ib_sa_register_client(&sa_client);
853
854	ret = ib_register_client(&mcast_client);
855	if (ret)
856		goto err;
857	return 0;
858
859err:
860	ib_sa_unregister_client(&sa_client);
861	destroy_workqueue(mcast_wq);
862	return ret;
863}
864
865void mcast_cleanup(void)
866{
867	ib_unregister_client(&mcast_client);
868	ib_sa_unregister_client(&sa_client);
869	destroy_workqueue(mcast_wq);
870}
871