alias_GUID.c revision 271127
1/*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 /***********************************************************/
33/*This file support the handling of the Alias GUID feature. */
34/***********************************************************/
35#include <rdma/ib_mad.h>
36#include <rdma/ib_smi.h>
37#include <rdma/ib_cache.h>
38#include <rdma/ib_sa.h>
39#include <rdma/ib_pack.h>
40#include <linux/mlx4/cmd.h>
41#include <linux/module.h>
42#include <linux/errno.h>
43#include <rdma/ib_user_verbs.h>
44#include <linux/delay.h>
45#include "mlx4_ib.h"
46
47/*
48The driver keeps the current state of all guids, as they are in the HW.
49Whenever we receive an smp mad GUIDInfo record, the data will be cached.
50*/
51
52struct mlx4_alias_guid_work_context {
53	u8 port;
54	struct mlx4_ib_dev     *dev ;
55	struct ib_sa_query     *sa_query;
56	struct completion	done;
57	int			query_id;
58	struct list_head	list;
59	int			block_num;
60};
61
62struct mlx4_next_alias_guid_work {
63	u8 port;
64	u8 block_num;
65	struct mlx4_sriov_alias_guid_info_rec_det rec_det;
66};
67
68
69void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
70					 u8 port_num, u8 *p_data)
71{
72	int i;
73	u64 guid_indexes;
74	int slave_id;
75	int port_index = port_num - 1;
76
77	if (!mlx4_is_master(dev->dev))
78		return;
79
80	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
81				   ports_guid[port_num - 1].
82				   all_rec_per_port[block_num].guid_indexes);
83	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, (long long)guid_indexes);
84
85	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
86		/* The location of the specific index starts from bit number 4
87		 * until bit num 11 */
88		if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
89			slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
90			if (slave_id >= dev->dev->num_slaves) {
91				pr_debug("The last slave: %d\n", slave_id);
92				return;
93			}
94
95			/* cache the guid: */
96			memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
97			       &p_data[i * GUID_REC_SIZE],
98			       GUID_REC_SIZE);
99		} else
100			pr_debug("Guid number: %d in block: %d"
101				 " was not updated\n", i, block_num);
102	}
103}
104
105static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
106{
107	if (index >= NUM_ALIAS_GUID_PER_PORT) {
108		pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
109		return (__force __be64) -1;
110	}
111	return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
112}
113
114
115ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
116{
117	return IB_SA_COMP_MASK(4 + index);
118}
119
120/*
121 * Whenever new GUID is set/unset (guid table change) create event and
122 * notify the relevant slave (master also should be notified).
123 * If the GUID value is not as we have in the cache the slave will not be
124 * updated; in this case it waits for the smp_snoop or the port management
125 * event to call the function and to update the slave.
126 * block_number - the index of the block (16 blocks available)
127 * port_number - 1 or 2
128 */
129void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
130					  int block_num, u8 port_num,
131					  u8 *p_data)
132{
133	int i;
134	u64 guid_indexes;
135	int slave_id;
136	enum slave_port_state new_state;
137	enum slave_port_state prev_state;
138	__be64 tmp_cur_ag, form_cache_ag;
139	enum slave_port_gen_event gen_event;
140
141	if (!mlx4_is_master(dev->dev))
142		return;
143
144	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
145				   ports_guid[port_num - 1].
146				   all_rec_per_port[block_num].guid_indexes);
147	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, (long long)guid_indexes);
148
149	/*calculate the slaves and notify them*/
150	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
151		/* the location of the specific index runs from bits 4..11 */
152		if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
153			continue;
154
155		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
156		if (slave_id >= dev->dev->num_slaves)
157			return;
158		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
159		form_cache_ag = get_cached_alias_guid(dev, port_num,
160					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
161		/*
162		 * Check if guid is not the same as in the cache,
163		 * If it is different, wait for the snoop_smp or the port mgmt
164		 * change event to update the slave on its port state change
165		 */
166		if (tmp_cur_ag != form_cache_ag)
167			continue;
168		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
169
170		/*2 cases: Valid GUID, and Invalid Guid*/
171
172		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
173			prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
174			new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
175								  MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
176								  &gen_event);
177			pr_debug("slave: %d, port: %d prev_port_state: %d,"
178				 " new_port_state: %d, gen_event: %d\n",
179				 slave_id, port_num, prev_state, new_state, gen_event);
180			if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
181				pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
182					 slave_id, port_num);
183				mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
184							       port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
185			}
186		} else { /* request to invalidate GUID */
187			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
188						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
189						      &gen_event);
190			pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
191				 slave_id, port_num);
192			mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
193						       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
194		}
195	}
196}
197
198static void aliasguid_query_handler(int status,
199				    struct ib_sa_guidinfo_rec *guid_rec,
200				    void *context)
201{
202	struct mlx4_ib_dev *dev;
203	struct mlx4_alias_guid_work_context *cb_ctx = context;
204	u8 port_index ;
205	int i;
206	struct mlx4_sriov_alias_guid_info_rec_det *rec;
207	unsigned long flags, flags1;
208
209	if (!context)
210		return;
211
212	dev = cb_ctx->dev;
213	port_index = cb_ctx->port - 1;
214	rec = &dev->sriov.alias_guid.ports_guid[port_index].
215		all_rec_per_port[cb_ctx->block_num];
216
217	if (status) {
218		rec->status = MLX4_GUID_INFO_STATUS_IDLE;
219		pr_debug("(port: %d) failed: status = %d\n",
220			 cb_ctx->port, status);
221		goto out;
222	}
223
224	if (guid_rec->block_num != cb_ctx->block_num) {
225		pr_err("block num mismatch: %d != %d\n",
226		       cb_ctx->block_num, guid_rec->block_num);
227		goto out;
228	}
229
230	pr_debug("lid/port: %d/%d, block_num: %d\n",
231		 be16_to_cpu(guid_rec->lid), cb_ctx->port,
232		 guid_rec->block_num);
233
234	rec = &dev->sriov.alias_guid.ports_guid[port_index].
235		all_rec_per_port[guid_rec->block_num];
236
237	rec->status = MLX4_GUID_INFO_STATUS_SET;
238	rec->method = MLX4_GUID_INFO_RECORD_SET;
239
240	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
241		__be64 tmp_cur_ag;
242		tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
243		/* check if the SM didn't assign one of the records.
244		 * if it didn't, if it was not sysadmin request:
245		 * ask the SM to give a new GUID, (instead of the driver request).
246		 */
247		if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
248			mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
249				     "block_num: %d was declined by SM, "
250				     "ownership by %d (0 = driver, 1=sysAdmin,"
251				     " 2=None)\n", __func__, i,
252				     guid_rec->block_num, rec->ownership);
253			if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
254				/* if it is driver assign, asks for new GUID from SM*/
255				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
256					MLX4_NOT_SET_GUID;
257
258				/* Mark the record as not assigned, and let it
259				 * be sent again in the next work sched.*/
260				rec->status = MLX4_GUID_INFO_STATUS_IDLE;
261				rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
262			}
263		} else {
264		       /* properly assigned record. */
265		       /* We save the GUID we just got from the SM in the
266			* admin_guid in order to be persistent, and in the
267			* request from the sm the process will ask for the same GUID */
268			if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
269			    tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
270				/* the sysadmin assignment failed.*/
271				mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
272					     " admin guid after SysAdmin "
273					     "configuration. "
274					     "Record num %d in block_num:%d "
275					     "was declined by SM, "
276					     "new val(0x%llx) was kept\n",
277					      __func__, i,
278					     guid_rec->block_num,
279					     (long long)be64_to_cpu(*(__be64 *) &
280							 rec->all_recs[i * GUID_REC_SIZE]));
281			} else {
282				memcpy(&rec->all_recs[i * GUID_REC_SIZE],
283				       &guid_rec->guid_info_list[i * GUID_REC_SIZE],
284				       GUID_REC_SIZE);
285			}
286		}
287	}
288	/*
289	The func is call here to close the cases when the
290	sm doesn't send smp, so in the sa response the driver
291	notifies the slave.
292	*/
293	mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
294					     cb_ctx->port,
295					     guid_rec->guid_info_list);
296out:
297	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
298	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
299	if (!dev->sriov.is_going_down)
300		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
301				   &dev->sriov.alias_guid.ports_guid[port_index].
302				   alias_guid_work, 0);
303	if (cb_ctx->sa_query) {
304		list_del(&cb_ctx->list);
305		kfree(cb_ctx);
306	} else
307		complete(&cb_ctx->done);
308	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
309	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
310}
311
312static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
313{
314	int i;
315	u64 cur_admin_val;
316	ib_sa_comp_mask comp_mask = 0;
317
318	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
319		= MLX4_GUID_INFO_STATUS_IDLE;
320	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
321		= MLX4_GUID_INFO_RECORD_SET;
322
323	/* calculate the comp_mask for that record.*/
324	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
325		cur_admin_val =
326			*(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
327			all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
328		/*
329		check the admin value: if it's for delete (~00LL) or
330		it is the first guid of the first record (hw guid) or
331		the records is not in ownership of the sysadmin and the sm doesn't
332		need to assign GUIDs, then don't put it up for assignment.
333		*/
334		if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
335		    (!index && !i) ||
336		    MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
337		    ports_guid[port - 1].all_rec_per_port[index].ownership)
338			continue;
339		comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
340	}
341	dev->sriov.alias_guid.ports_guid[port - 1].
342		all_rec_per_port[index].guid_indexes = comp_mask;
343}
344
345static int set_guid_rec(struct ib_device *ibdev,
346			u8 port, int index,
347			struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
348{
349	int err;
350	struct mlx4_ib_dev *dev = to_mdev(ibdev);
351	struct ib_sa_guidinfo_rec guid_info_rec;
352	ib_sa_comp_mask comp_mask;
353	struct ib_port_attr attr;
354	struct mlx4_alias_guid_work_context *callback_context;
355	unsigned long resched_delay, flags, flags1;
356	struct list_head *head =
357		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
358
359	err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
360	if (err) {
361		pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
362			 err, port);
363		return err;
364	}
365	/*check the port was configured by the sm, otherwise no need to send */
366	if (attr.state != IB_PORT_ACTIVE) {
367		pr_debug("port %d not active...rescheduling\n", port);
368		resched_delay = 5 * HZ;
369		err = -EAGAIN;
370		goto new_schedule;
371	}
372
373	callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
374	if (!callback_context) {
375		err = -ENOMEM;
376		resched_delay = HZ * 5;
377		goto new_schedule;
378	}
379	callback_context->port = port;
380	callback_context->dev = dev;
381	callback_context->block_num = index;
382
383	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
384
385	guid_info_rec.lid = cpu_to_be16(attr.lid);
386	guid_info_rec.block_num = index;
387
388	memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
389	       GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
390	comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
391		rec_det->guid_indexes;
392
393	init_completion(&callback_context->done);
394	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
395	list_add_tail(&callback_context->list, head);
396	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
397
398	callback_context->query_id =
399		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
400					  ibdev, port, &guid_info_rec,
401					  comp_mask, rec_det->method, 1000,
402					  GFP_KERNEL, aliasguid_query_handler,
403					  callback_context,
404					  &callback_context->sa_query);
405	if (callback_context->query_id < 0) {
406		pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
407			 "%d. will reschedule to the next 1 sec.\n",
408			 callback_context->query_id);
409		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
410		list_del(&callback_context->list);
411		kfree(callback_context);
412		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
413		resched_delay = 1 * HZ;
414		err = -EAGAIN;
415		goto new_schedule;
416	}
417	err = 0;
418	goto out;
419
420new_schedule:
421	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
422	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
423	invalidate_guid_record(dev, port, index);
424	if (!dev->sriov.is_going_down) {
425		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
426				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
427				   resched_delay);
428	}
429	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
430	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
431
432out:
433	return err;
434}
435
436void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
437{
438	int i;
439	unsigned long flags, flags1;
440
441	pr_debug("port %d\n", port);
442
443	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
444	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
445	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
446		invalidate_guid_record(dev, port, i);
447
448	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
449		/*
450		make sure no work waits in the queue, if the work is already
451		queued(not on the timer) the cancel will fail. That is not a problem
452		because we just want the work started.
453		*/
454		cancel_delayed_work(&dev->sriov.alias_guid.
455				      ports_guid[port - 1].alias_guid_work);
456		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
457				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
458				   0);
459	}
460	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
461	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
462}
463
464/* The function returns the next record that was
465 * not configured (or failed to be configured) */
466static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
467				     struct mlx4_next_alias_guid_work *rec)
468{
469	int j;
470	unsigned long flags;
471
472	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
473		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
474		if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
475		    MLX4_GUID_INFO_STATUS_IDLE) {
476			memcpy(&rec->rec_det,
477			       &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
478			       sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
479			rec->port = port;
480			rec->block_num = j;
481			dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
482				MLX4_GUID_INFO_STATUS_PENDING;
483			spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
484			return 0;
485		}
486		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
487	}
488	return -ENOENT;
489}
490
491static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
492					     int rec_index,
493					     struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
494{
495	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
496		rec_det->guid_indexes;
497	memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
498	       rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
499	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
500		rec_det->status;
501}
502
503static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
504{
505	int j;
506	struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
507
508	for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
509		memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
510		rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
511			IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
512			IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
513			IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
514			IB_SA_GUIDINFO_REC_GID7;
515		rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
516		set_administratively_guid_record(dev, port, j, &rec_det);
517	}
518}
519
520static void alias_guid_work(struct work_struct *work)
521{
522	struct delayed_work *delay = to_delayed_work(work);
523	int ret = 0;
524	struct mlx4_next_alias_guid_work *rec;
525	struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
526		container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
527			     alias_guid_work);
528	struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
529	struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
530						struct mlx4_ib_sriov,
531						alias_guid);
532	struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
533
534	rec = kzalloc(sizeof *rec, GFP_KERNEL);
535	if (!rec) {
536		pr_err("alias_guid_work: No Memory\n");
537		return;
538	}
539
540	pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
541	ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
542	if (ret) {
543		pr_debug("No more records to update.\n");
544		goto out;
545	}
546
547	set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
548		     &rec->rec_det);
549
550out:
551	kfree(rec);
552}
553
554
555void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
556{
557	unsigned long flags, flags1;
558
559	if (!mlx4_is_master(dev->dev))
560		return;
561	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
562	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
563	if (!dev->sriov.is_going_down) {
564		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
565			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
566	}
567	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
568	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
569}
570
571void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
572{
573	int i;
574	struct mlx4_ib_sriov *sriov = &dev->sriov;
575	struct mlx4_alias_guid_work_context *cb_ctx;
576	struct mlx4_sriov_alias_guid_port_rec_det *det;
577	struct ib_sa_query *sa_query;
578	unsigned long flags;
579
580	for (i = 0 ; i < dev->num_ports; i++) {
581		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
582		det = &sriov->alias_guid.ports_guid[i];
583		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
584		while (!list_empty(&det->cb_list)) {
585			cb_ctx = list_entry(det->cb_list.next,
586					    struct mlx4_alias_guid_work_context,
587					    list);
588			sa_query = cb_ctx->sa_query;
589			cb_ctx->sa_query = NULL;
590			list_del(&cb_ctx->list);
591			spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
592			ib_sa_cancel_query(cb_ctx->query_id, sa_query);
593			wait_for_completion(&cb_ctx->done);
594			kfree(cb_ctx);
595			spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
596		}
597		spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
598	}
599	for (i = 0 ; i < dev->num_ports; i++) {
600		flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
601		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
602	}
603	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
604	kfree(dev->sriov.alias_guid.sa_client);
605}
606
607int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
608{
609	char alias_wq_name[15];
610	int ret = 0;
611	int i, j, k;
612	union ib_gid gid;
613
614	if (!mlx4_is_master(dev->dev))
615		return 0;
616	dev->sriov.alias_guid.sa_client =
617		kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
618	if (!dev->sriov.alias_guid.sa_client)
619		return -ENOMEM;
620
621	ib_sa_register_client(dev->sriov.alias_guid.sa_client);
622
623	spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
624
625	for (i = 1; i <= dev->num_ports; ++i) {
626		if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
627			ret = -EFAULT;
628			goto err_unregister;
629		}
630	}
631
632	for (i = 0 ; i < dev->num_ports; i++) {
633		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
634		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
635		/*Check if the SM doesn't need to assign the GUIDs*/
636		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
637			if (mlx4_ib_sm_guid_assign) {
638				dev->sriov.alias_guid.ports_guid[i].
639					all_rec_per_port[j].
640					ownership = MLX4_GUID_DRIVER_ASSIGN;
641				continue;
642			}
643			dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
644					ownership = MLX4_GUID_NONE_ASSIGN;
645			/*mark each val as it was deleted,
646			  till the sysAdmin will give it valid val*/
647			for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
648				*(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
649					all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
650						cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
651			}
652		}
653		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
654		/*prepare the records, set them to be allocated by sm*/
655		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
656			invalidate_guid_record(dev, i + 1, j);
657
658		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
659		dev->sriov.alias_guid.ports_guid[i].port  = i;
660		if (mlx4_ib_sm_guid_assign)
661			set_all_slaves_guids(dev, i);
662
663		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
664		dev->sriov.alias_guid.ports_guid[i].wq =
665			create_singlethread_workqueue(alias_wq_name);
666		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
667			ret = -ENOMEM;
668			goto err_thread;
669		}
670		INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
671			  alias_guid_work);
672	}
673	return 0;
674
675err_thread:
676	for (--i; i >= 0; i--) {
677		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
678		dev->sriov.alias_guid.ports_guid[i].wq = NULL;
679	}
680
681err_unregister:
682	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
683	kfree(dev->sriov.alias_guid.sa_client);
684	dev->sriov.alias_guid.sa_client = NULL;
685	pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
686	return ret;
687}
688