1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c)  2003-2014 QLogic Corporation
5 */
6#include "qla_def.h"
7#include "qla_target.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/slab.h>
12#include <linux/delay.h>
13
14static int qla24xx_vport_disable(struct fc_vport *, bool);
15
16/* SYSFS attributes --------------------------------------------------------- */
17
18static ssize_t
19qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
20			   struct bin_attribute *bin_attr,
21			   char *buf, loff_t off, size_t count)
22{
23	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24	    struct device, kobj)));
25	struct qla_hw_data *ha = vha->hw;
26	int rval = 0;
27
28	if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
29	      ha->mpi_fw_dump_reading))
30		return 0;
31
32	mutex_lock(&ha->optrom_mutex);
33	if (IS_P3P_TYPE(ha)) {
34		if (off < ha->md_template_size) {
35			rval = memory_read_from_buffer(buf, count,
36			    &off, ha->md_tmplt_hdr, ha->md_template_size);
37		} else {
38			off -= ha->md_template_size;
39			rval = memory_read_from_buffer(buf, count,
40			    &off, ha->md_dump, ha->md_dump_size);
41		}
42	} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43		rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44		    MCTP_DUMP_SIZE);
45	} else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
46		rval = memory_read_from_buffer(buf, count, &off,
47					       ha->mpi_fw_dump,
48					       ha->mpi_fw_dump_len);
49	} else if (ha->fw_dump_reading) {
50		rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
51					ha->fw_dump_len);
52	} else {
53		rval = 0;
54	}
55	mutex_unlock(&ha->optrom_mutex);
56	return rval;
57}
58
59static ssize_t
60qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
61			    struct bin_attribute *bin_attr,
62			    char *buf, loff_t off, size_t count)
63{
64	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
65	    struct device, kobj)));
66	struct qla_hw_data *ha = vha->hw;
67	int reading;
68
69	if (off != 0)
70		return (0);
71
72	reading = simple_strtol(buf, NULL, 10);
73	switch (reading) {
74	case 0:
75		if (!ha->fw_dump_reading)
76			break;
77
78		ql_log(ql_log_info, vha, 0x705d,
79		    "Firmware dump cleared on (%ld).\n", vha->host_no);
80
81		if (IS_P3P_TYPE(ha)) {
82			qla82xx_md_free(vha);
83			qla82xx_md_prep(vha);
84		}
85		ha->fw_dump_reading = 0;
86		ha->fw_dumped = false;
87		break;
88	case 1:
89		if (ha->fw_dumped && !ha->fw_dump_reading) {
90			ha->fw_dump_reading = 1;
91
92			ql_log(ql_log_info, vha, 0x705e,
93			    "Raw firmware dump ready for read on (%ld).\n",
94			    vha->host_no);
95		}
96		break;
97	case 2:
98		qla2x00_alloc_fw_dump(vha);
99		break;
100	case 3:
101		if (IS_QLA82XX(ha)) {
102			qla82xx_idc_lock(ha);
103			qla82xx_set_reset_owner(vha);
104			qla82xx_idc_unlock(ha);
105		} else if (IS_QLA8044(ha)) {
106			qla8044_idc_lock(ha);
107			qla82xx_set_reset_owner(vha);
108			qla8044_idc_unlock(ha);
109		} else {
110			qla2x00_system_error(vha);
111		}
112		break;
113	case 4:
114		if (IS_P3P_TYPE(ha)) {
115			if (ha->md_tmplt_hdr)
116				ql_dbg(ql_dbg_user, vha, 0x705b,
117				    "MiniDump supported with this firmware.\n");
118			else
119				ql_dbg(ql_dbg_user, vha, 0x709d,
120				    "MiniDump not supported with this firmware.\n");
121		}
122		break;
123	case 5:
124		if (IS_P3P_TYPE(ha))
125			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
126		break;
127	case 6:
128		if (!ha->mctp_dump_reading)
129			break;
130		ql_log(ql_log_info, vha, 0x70c1,
131		    "MCTP dump cleared on (%ld).\n", vha->host_no);
132		ha->mctp_dump_reading = 0;
133		ha->mctp_dumped = 0;
134		break;
135	case 7:
136		if (ha->mctp_dumped && !ha->mctp_dump_reading) {
137			ha->mctp_dump_reading = 1;
138			ql_log(ql_log_info, vha, 0x70c2,
139			    "Raw mctp dump ready for read on (%ld).\n",
140			    vha->host_no);
141		}
142		break;
143	case 8:
144		if (!ha->mpi_fw_dump_reading)
145			break;
146		ql_log(ql_log_info, vha, 0x70e7,
147		       "MPI firmware dump cleared on (%ld).\n", vha->host_no);
148		ha->mpi_fw_dump_reading = 0;
149		ha->mpi_fw_dumped = 0;
150		break;
151	case 9:
152		if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
153			ha->mpi_fw_dump_reading = 1;
154			ql_log(ql_log_info, vha, 0x70e8,
155			       "Raw MPI firmware dump ready for read on (%ld).\n",
156			       vha->host_no);
157		}
158		break;
159	case 10:
160		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
161			ql_log(ql_log_info, vha, 0x70e9,
162			       "Issuing MPI firmware dump on host#%ld.\n",
163			       vha->host_no);
164			ha->isp_ops->mpi_fw_dump(vha, 0);
165		}
166		break;
167	}
168	return count;
169}
170
171static struct bin_attribute sysfs_fw_dump_attr = {
172	.attr = {
173		.name = "fw_dump",
174		.mode = S_IRUSR | S_IWUSR,
175	},
176	.size = 0,
177	.read = qla2x00_sysfs_read_fw_dump,
178	.write = qla2x00_sysfs_write_fw_dump,
179};
180
181static ssize_t
182qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
183			 struct bin_attribute *bin_attr,
184			 char *buf, loff_t off, size_t count)
185{
186	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
187	    struct device, kobj)));
188	struct qla_hw_data *ha = vha->hw;
189	uint32_t faddr;
190	struct active_regions active_regions = { };
191
192	if (!capable(CAP_SYS_ADMIN))
193		return 0;
194
195	mutex_lock(&ha->optrom_mutex);
196	if (qla2x00_chip_is_down(vha)) {
197		mutex_unlock(&ha->optrom_mutex);
198		return -EAGAIN;
199	}
200
201	if (!IS_NOCACHE_VPD_TYPE(ha)) {
202		mutex_unlock(&ha->optrom_mutex);
203		goto skip;
204	}
205
206	faddr = ha->flt_region_nvram;
207	if (IS_QLA28XX(ha)) {
208		qla28xx_get_aux_images(vha, &active_regions);
209		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
210			faddr = ha->flt_region_nvram_sec;
211	}
212	ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
213
214	mutex_unlock(&ha->optrom_mutex);
215
216skip:
217	return memory_read_from_buffer(buf, count, &off, ha->nvram,
218					ha->nvram_size);
219}
220
221static ssize_t
222qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
223			  struct bin_attribute *bin_attr,
224			  char *buf, loff_t off, size_t count)
225{
226	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
227	    struct device, kobj)));
228	struct qla_hw_data *ha = vha->hw;
229	uint16_t	cnt;
230
231	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
232	    !ha->isp_ops->write_nvram)
233		return -EINVAL;
234
235	/* Checksum NVRAM. */
236	if (IS_FWI2_CAPABLE(ha)) {
237		__le32 *iter = (__force __le32 *)buf;
238		uint32_t chksum;
239
240		chksum = 0;
241		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
242			chksum += le32_to_cpu(*iter);
243		chksum = ~chksum + 1;
244		*iter = cpu_to_le32(chksum);
245	} else {
246		uint8_t *iter;
247		uint8_t chksum;
248
249		iter = (uint8_t *)buf;
250		chksum = 0;
251		for (cnt = 0; cnt < count - 1; cnt++)
252			chksum += *iter++;
253		chksum = ~chksum + 1;
254		*iter = chksum;
255	}
256
257	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
258		ql_log(ql_log_warn, vha, 0x705f,
259		    "HBA not online, failing NVRAM update.\n");
260		return -EAGAIN;
261	}
262
263	mutex_lock(&ha->optrom_mutex);
264	if (qla2x00_chip_is_down(vha)) {
265		mutex_unlock(&ha->optrom_mutex);
266		return -EAGAIN;
267	}
268
269	/* Write NVRAM. */
270	ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
271	ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
272	    count);
273	mutex_unlock(&ha->optrom_mutex);
274
275	ql_dbg(ql_dbg_user, vha, 0x7060,
276	    "Setting ISP_ABORT_NEEDED\n");
277	/* NVRAM settings take effect immediately. */
278	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
279	qla2xxx_wake_dpc(vha);
280	qla2x00_wait_for_chip_reset(vha);
281
282	return count;
283}
284
285static struct bin_attribute sysfs_nvram_attr = {
286	.attr = {
287		.name = "nvram",
288		.mode = S_IRUSR | S_IWUSR,
289	},
290	.size = 512,
291	.read = qla2x00_sysfs_read_nvram,
292	.write = qla2x00_sysfs_write_nvram,
293};
294
295static ssize_t
296qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
297			  struct bin_attribute *bin_attr,
298			  char *buf, loff_t off, size_t count)
299{
300	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
301	    struct device, kobj)));
302	struct qla_hw_data *ha = vha->hw;
303	ssize_t rval = 0;
304
305	mutex_lock(&ha->optrom_mutex);
306
307	if (ha->optrom_state != QLA_SREADING)
308		goto out;
309
310	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
311	    ha->optrom_region_size);
312
313out:
314	mutex_unlock(&ha->optrom_mutex);
315
316	return rval;
317}
318
319static ssize_t
320qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
321			   struct bin_attribute *bin_attr,
322			   char *buf, loff_t off, size_t count)
323{
324	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
325	    struct device, kobj)));
326	struct qla_hw_data *ha = vha->hw;
327
328	mutex_lock(&ha->optrom_mutex);
329
330	if (ha->optrom_state != QLA_SWRITING) {
331		mutex_unlock(&ha->optrom_mutex);
332		return -EINVAL;
333	}
334	if (off > ha->optrom_region_size) {
335		mutex_unlock(&ha->optrom_mutex);
336		return -ERANGE;
337	}
338	if (off + count > ha->optrom_region_size)
339		count = ha->optrom_region_size - off;
340
341	memcpy(&ha->optrom_buffer[off], buf, count);
342	mutex_unlock(&ha->optrom_mutex);
343
344	return count;
345}
346
347static struct bin_attribute sysfs_optrom_attr = {
348	.attr = {
349		.name = "optrom",
350		.mode = S_IRUSR | S_IWUSR,
351	},
352	.size = 0,
353	.read = qla2x00_sysfs_read_optrom,
354	.write = qla2x00_sysfs_write_optrom,
355};
356
357static ssize_t
358qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359			       struct bin_attribute *bin_attr,
360			       char *buf, loff_t off, size_t count)
361{
362	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
363	    struct device, kobj)));
364	struct qla_hw_data *ha = vha->hw;
365	uint32_t start = 0;
366	uint32_t size = ha->optrom_size;
367	int val, valid;
368	ssize_t rval = count;
369
370	if (off)
371		return -EINVAL;
372
373	if (unlikely(pci_channel_offline(ha->pdev)))
374		return -EAGAIN;
375
376	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
377		return -EINVAL;
378	if (start > ha->optrom_size)
379		return -EINVAL;
380	if (size > ha->optrom_size - start)
381		size = ha->optrom_size - start;
382
383	mutex_lock(&ha->optrom_mutex);
384	if (qla2x00_chip_is_down(vha)) {
385		mutex_unlock(&ha->optrom_mutex);
386		return -EAGAIN;
387	}
388	switch (val) {
389	case 0:
390		if (ha->optrom_state != QLA_SREADING &&
391		    ha->optrom_state != QLA_SWRITING) {
392			rval =  -EINVAL;
393			goto out;
394		}
395		ha->optrom_state = QLA_SWAITING;
396
397		ql_dbg(ql_dbg_user, vha, 0x7061,
398		    "Freeing flash region allocation -- 0x%x bytes.\n",
399		    ha->optrom_region_size);
400
401		vfree(ha->optrom_buffer);
402		ha->optrom_buffer = NULL;
403		break;
404	case 1:
405		if (ha->optrom_state != QLA_SWAITING) {
406			rval = -EINVAL;
407			goto out;
408		}
409
410		ha->optrom_region_start = start;
411		ha->optrom_region_size = size;
412
413		ha->optrom_state = QLA_SREADING;
414		ha->optrom_buffer = vzalloc(ha->optrom_region_size);
415		if (ha->optrom_buffer == NULL) {
416			ql_log(ql_log_warn, vha, 0x7062,
417			    "Unable to allocate memory for optrom retrieval "
418			    "(%x).\n", ha->optrom_region_size);
419
420			ha->optrom_state = QLA_SWAITING;
421			rval = -ENOMEM;
422			goto out;
423		}
424
425		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
426			ql_log(ql_log_warn, vha, 0x7063,
427			    "HBA not online, failing NVRAM update.\n");
428			rval = -EAGAIN;
429			goto out;
430		}
431
432		ql_dbg(ql_dbg_user, vha, 0x7064,
433		    "Reading flash region -- 0x%x/0x%x.\n",
434		    ha->optrom_region_start, ha->optrom_region_size);
435
436		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
437		    ha->optrom_region_start, ha->optrom_region_size);
438		break;
439	case 2:
440		if (ha->optrom_state != QLA_SWAITING) {
441			rval = -EINVAL;
442			goto out;
443		}
444
445		/*
446		 * We need to be more restrictive on which FLASH regions are
447		 * allowed to be updated via user-space.  Regions accessible
448		 * via this method include:
449		 *
450		 * ISP21xx/ISP22xx/ISP23xx type boards:
451		 *
452		 * 	0x000000 -> 0x020000 -- Boot code.
453		 *
454		 * ISP2322/ISP24xx type boards:
455		 *
456		 * 	0x000000 -> 0x07ffff -- Boot code.
457		 * 	0x080000 -> 0x0fffff -- Firmware.
458		 *
459		 * ISP25xx type boards:
460		 *
461		 * 	0x000000 -> 0x07ffff -- Boot code.
462		 * 	0x080000 -> 0x0fffff -- Firmware.
463		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
464		 *
465		 * > ISP25xx type boards:
466		 *
467		 *      None -- should go through BSG.
468		 */
469		valid = 0;
470		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
471			valid = 1;
472		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
473			valid = 1;
474		if (!valid) {
475			ql_log(ql_log_warn, vha, 0x7065,
476			    "Invalid start region 0x%x/0x%x.\n", start, size);
477			rval = -EINVAL;
478			goto out;
479		}
480
481		ha->optrom_region_start = start;
482		ha->optrom_region_size = size;
483
484		ha->optrom_state = QLA_SWRITING;
485		ha->optrom_buffer = vzalloc(ha->optrom_region_size);
486		if (ha->optrom_buffer == NULL) {
487			ql_log(ql_log_warn, vha, 0x7066,
488			    "Unable to allocate memory for optrom update "
489			    "(%x)\n", ha->optrom_region_size);
490
491			ha->optrom_state = QLA_SWAITING;
492			rval = -ENOMEM;
493			goto out;
494		}
495
496		ql_dbg(ql_dbg_user, vha, 0x7067,
497		    "Staging flash region write -- 0x%x/0x%x.\n",
498		    ha->optrom_region_start, ha->optrom_region_size);
499
500		break;
501	case 3:
502		if (ha->optrom_state != QLA_SWRITING) {
503			rval = -EINVAL;
504			goto out;
505		}
506
507		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
508			ql_log(ql_log_warn, vha, 0x7068,
509			    "HBA not online, failing flash update.\n");
510			rval = -EAGAIN;
511			goto out;
512		}
513
514		ql_dbg(ql_dbg_user, vha, 0x7069,
515		    "Writing flash region -- 0x%x/0x%x.\n",
516		    ha->optrom_region_start, ha->optrom_region_size);
517
518		rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
519		    ha->optrom_region_start, ha->optrom_region_size);
520		if (rval)
521			rval = -EIO;
522		break;
523	default:
524		rval = -EINVAL;
525	}
526
527out:
528	mutex_unlock(&ha->optrom_mutex);
529	return rval;
530}
531
532static struct bin_attribute sysfs_optrom_ctl_attr = {
533	.attr = {
534		.name = "optrom_ctl",
535		.mode = S_IWUSR,
536	},
537	.size = 0,
538	.write = qla2x00_sysfs_write_optrom_ctl,
539};
540
541static ssize_t
542qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
543		       struct bin_attribute *bin_attr,
544		       char *buf, loff_t off, size_t count)
545{
546	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
547	    struct device, kobj)));
548	struct qla_hw_data *ha = vha->hw;
549	uint32_t faddr;
550	struct active_regions active_regions = { };
551
552	if (unlikely(pci_channel_offline(ha->pdev)))
553		return -EAGAIN;
554
555	if (!capable(CAP_SYS_ADMIN))
556		return -EINVAL;
557
558	if (!IS_NOCACHE_VPD_TYPE(ha))
559		goto skip;
560
561	faddr = ha->flt_region_vpd << 2;
562
563	if (IS_QLA28XX(ha)) {
564		qla28xx_get_aux_images(vha, &active_regions);
565		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
566			faddr = ha->flt_region_vpd_sec << 2;
567
568		ql_dbg(ql_dbg_init, vha, 0x7070,
569		    "Loading %s nvram image.\n",
570		    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
571		    "primary" : "secondary");
572	}
573
574	mutex_lock(&ha->optrom_mutex);
575	if (qla2x00_chip_is_down(vha)) {
576		mutex_unlock(&ha->optrom_mutex);
577		return -EAGAIN;
578	}
579
580	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
581	mutex_unlock(&ha->optrom_mutex);
582
583	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
584skip:
585	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
586}
587
588static ssize_t
589qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
590			struct bin_attribute *bin_attr,
591			char *buf, loff_t off, size_t count)
592{
593	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
594	    struct device, kobj)));
595	struct qla_hw_data *ha = vha->hw;
596	uint8_t *tmp_data;
597
598	if (unlikely(pci_channel_offline(ha->pdev)))
599		return 0;
600
601	if (qla2x00_chip_is_down(vha))
602		return 0;
603
604	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
605	    !ha->isp_ops->write_nvram)
606		return 0;
607
608	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
609		ql_log(ql_log_warn, vha, 0x706a,
610		    "HBA not online, failing VPD update.\n");
611		return -EAGAIN;
612	}
613
614	mutex_lock(&ha->optrom_mutex);
615	if (qla2x00_chip_is_down(vha)) {
616		mutex_unlock(&ha->optrom_mutex);
617		return -EAGAIN;
618	}
619
620	/* Write NVRAM. */
621	ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
622	ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
623
624	/* Update flash version information for 4Gb & above. */
625	if (!IS_FWI2_CAPABLE(ha)) {
626		mutex_unlock(&ha->optrom_mutex);
627		return -EINVAL;
628	}
629
630	tmp_data = vmalloc(256);
631	if (!tmp_data) {
632		mutex_unlock(&ha->optrom_mutex);
633		ql_log(ql_log_warn, vha, 0x706b,
634		    "Unable to allocate memory for VPD information update.\n");
635		return -ENOMEM;
636	}
637	ha->isp_ops->get_flash_version(vha, tmp_data);
638	vfree(tmp_data);
639
640	mutex_unlock(&ha->optrom_mutex);
641
642	return count;
643}
644
645static struct bin_attribute sysfs_vpd_attr = {
646	.attr = {
647		.name = "vpd",
648		.mode = S_IRUSR | S_IWUSR,
649	},
650	.size = 0,
651	.read = qla2x00_sysfs_read_vpd,
652	.write = qla2x00_sysfs_write_vpd,
653};
654
655static ssize_t
656qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
657		       struct bin_attribute *bin_attr,
658		       char *buf, loff_t off, size_t count)
659{
660	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
661	    struct device, kobj)));
662	int rval;
663
664	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
665		return 0;
666
667	mutex_lock(&vha->hw->optrom_mutex);
668	if (qla2x00_chip_is_down(vha)) {
669		mutex_unlock(&vha->hw->optrom_mutex);
670		return 0;
671	}
672
673	rval = qla2x00_read_sfp_dev(vha, buf, count);
674	mutex_unlock(&vha->hw->optrom_mutex);
675
676	if (rval)
677		return -EIO;
678
679	return count;
680}
681
682static struct bin_attribute sysfs_sfp_attr = {
683	.attr = {
684		.name = "sfp",
685		.mode = S_IRUSR | S_IWUSR,
686	},
687	.size = SFP_DEV_SIZE,
688	.read = qla2x00_sysfs_read_sfp,
689};
690
691static ssize_t
692qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
693			struct bin_attribute *bin_attr,
694			char *buf, loff_t off, size_t count)
695{
696	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
697	    struct device, kobj)));
698	struct qla_hw_data *ha = vha->hw;
699	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
700	int type;
701	uint32_t idc_control;
702	uint8_t *tmp_data = NULL;
703
704	if (off != 0)
705		return -EINVAL;
706
707	type = simple_strtol(buf, NULL, 10);
708	switch (type) {
709	case 0x2025c:
710		ql_log(ql_log_info, vha, 0x706e,
711		    "Issuing ISP reset.\n");
712
713		if (vha->hw->flags.port_isolated) {
714			ql_log(ql_log_info, vha, 0x706e,
715			       "Port is isolated, returning.\n");
716			return -EINVAL;
717		}
718
719		scsi_block_requests(vha->host);
720		if (IS_QLA82XX(ha)) {
721			ha->flags.isp82xx_no_md_cap = 1;
722			qla82xx_idc_lock(ha);
723			qla82xx_set_reset_owner(vha);
724			qla82xx_idc_unlock(ha);
725		} else if (IS_QLA8044(ha)) {
726			qla8044_idc_lock(ha);
727			idc_control = qla8044_rd_reg(ha,
728			    QLA8044_IDC_DRV_CTRL);
729			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
730			    (idc_control | GRACEFUL_RESET_BIT1));
731			qla82xx_set_reset_owner(vha);
732			qla8044_idc_unlock(ha);
733		} else {
734			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
735			qla2xxx_wake_dpc(vha);
736		}
737		qla2x00_wait_for_chip_reset(vha);
738		scsi_unblock_requests(vha->host);
739		break;
740	case 0x2025d:
741		if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
742		    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
743			return -EPERM;
744
745		ql_log(ql_log_info, vha, 0x706f,
746		    "Issuing MPI reset.\n");
747
748		if (IS_QLA83XX(ha)) {
749			uint32_t idc_control;
750
751			qla83xx_idc_lock(vha, 0);
752			__qla83xx_get_idc_control(vha, &idc_control);
753			idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
754			__qla83xx_set_idc_control(vha, idc_control);
755			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
756			    QLA8XXX_DEV_NEED_RESET);
757			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
758			qla83xx_idc_unlock(vha, 0);
759			break;
760		} else {
761			/* Make sure FC side is not in reset */
762			WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
763				     QLA_SUCCESS);
764
765			/* Issue MPI reset */
766			scsi_block_requests(vha->host);
767			if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
768				ql_log(ql_log_warn, vha, 0x7070,
769				    "MPI reset failed.\n");
770			scsi_unblock_requests(vha->host);
771			break;
772		}
773		break;
774	case 0x2025e:
775		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
776			ql_log(ql_log_info, vha, 0x7071,
777			    "FCoE ctx reset not supported.\n");
778			return -EPERM;
779		}
780
781		ql_log(ql_log_info, vha, 0x7072,
782		    "Issuing FCoE ctx reset.\n");
783		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
784		qla2xxx_wake_dpc(vha);
785		qla2x00_wait_for_fcoe_ctx_reset(vha);
786		break;
787	case 0x2025f:
788		if (!IS_QLA8031(ha))
789			return -EPERM;
790		ql_log(ql_log_info, vha, 0x70bc,
791		    "Disabling Reset by IDC control\n");
792		qla83xx_idc_lock(vha, 0);
793		__qla83xx_get_idc_control(vha, &idc_control);
794		idc_control |= QLA83XX_IDC_RESET_DISABLED;
795		__qla83xx_set_idc_control(vha, idc_control);
796		qla83xx_idc_unlock(vha, 0);
797		break;
798	case 0x20260:
799		if (!IS_QLA8031(ha))
800			return -EPERM;
801		ql_log(ql_log_info, vha, 0x70bd,
802		    "Enabling Reset by IDC control\n");
803		qla83xx_idc_lock(vha, 0);
804		__qla83xx_get_idc_control(vha, &idc_control);
805		idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
806		__qla83xx_set_idc_control(vha, idc_control);
807		qla83xx_idc_unlock(vha, 0);
808		break;
809	case 0x20261:
810		ql_dbg(ql_dbg_user, vha, 0x70e0,
811		    "Updating cache versions without reset ");
812
813		tmp_data = vmalloc(256);
814		if (!tmp_data) {
815			ql_log(ql_log_warn, vha, 0x70e1,
816			    "Unable to allocate memory for VPD information update.\n");
817			return -ENOMEM;
818		}
819		ha->isp_ops->get_flash_version(vha, tmp_data);
820		vfree(tmp_data);
821		break;
822	}
823	return count;
824}
825
826static struct bin_attribute sysfs_reset_attr = {
827	.attr = {
828		.name = "reset",
829		.mode = S_IWUSR,
830	},
831	.size = 0,
832	.write = qla2x00_sysfs_write_reset,
833};
834
835static ssize_t
836qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
837			struct bin_attribute *bin_attr,
838			char *buf, loff_t off, size_t count)
839{
840	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
841	    struct device, kobj)));
842	int type;
843	port_id_t did;
844
845	if (!capable(CAP_SYS_ADMIN))
846		return 0;
847
848	if (unlikely(pci_channel_offline(vha->hw->pdev)))
849		return 0;
850
851	if (qla2x00_chip_is_down(vha))
852		return 0;
853
854	type = simple_strtol(buf, NULL, 10);
855
856	did.b.domain = (type & 0x00ff0000) >> 16;
857	did.b.area = (type & 0x0000ff00) >> 8;
858	did.b.al_pa = (type & 0x000000ff);
859
860	ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
861	    did.b.domain, did.b.area, did.b.al_pa);
862
863	ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
864
865	qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
866	return count;
867}
868
869static struct bin_attribute sysfs_issue_logo_attr = {
870	.attr = {
871		.name = "issue_logo",
872		.mode = S_IWUSR,
873	},
874	.size = 0,
875	.write = qla2x00_issue_logo,
876};
877
878static ssize_t
879qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
880		       struct bin_attribute *bin_attr,
881		       char *buf, loff_t off, size_t count)
882{
883	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
884	    struct device, kobj)));
885	struct qla_hw_data *ha = vha->hw;
886	int rval;
887	uint16_t actual_size;
888
889	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
890		return 0;
891
892	if (unlikely(pci_channel_offline(ha->pdev)))
893		return 0;
894	mutex_lock(&vha->hw->optrom_mutex);
895	if (qla2x00_chip_is_down(vha)) {
896		mutex_unlock(&vha->hw->optrom_mutex);
897		return 0;
898	}
899
900	if (ha->xgmac_data)
901		goto do_read;
902
903	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
904	    &ha->xgmac_data_dma, GFP_KERNEL);
905	if (!ha->xgmac_data) {
906		mutex_unlock(&vha->hw->optrom_mutex);
907		ql_log(ql_log_warn, vha, 0x7076,
908		    "Unable to allocate memory for XGMAC read-data.\n");
909		return 0;
910	}
911
912do_read:
913	actual_size = 0;
914	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
915
916	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
917	    XGMAC_DATA_SIZE, &actual_size);
918
919	mutex_unlock(&vha->hw->optrom_mutex);
920	if (rval != QLA_SUCCESS) {
921		ql_log(ql_log_warn, vha, 0x7077,
922		    "Unable to read XGMAC data (%x).\n", rval);
923		count = 0;
924	}
925
926	count = actual_size > count ? count : actual_size;
927	memcpy(buf, ha->xgmac_data, count);
928
929	return count;
930}
931
932static struct bin_attribute sysfs_xgmac_stats_attr = {
933	.attr = {
934		.name = "xgmac_stats",
935		.mode = S_IRUSR,
936	},
937	.size = 0,
938	.read = qla2x00_sysfs_read_xgmac_stats,
939};
940
941static ssize_t
942qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
943		       struct bin_attribute *bin_attr,
944		       char *buf, loff_t off, size_t count)
945{
946	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
947	    struct device, kobj)));
948	struct qla_hw_data *ha = vha->hw;
949	int rval;
950
951	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
952		return 0;
953
954	mutex_lock(&vha->hw->optrom_mutex);
955	if (ha->dcbx_tlv)
956		goto do_read;
957	if (qla2x00_chip_is_down(vha)) {
958		mutex_unlock(&vha->hw->optrom_mutex);
959		return 0;
960	}
961
962	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
963	    &ha->dcbx_tlv_dma, GFP_KERNEL);
964	if (!ha->dcbx_tlv) {
965		mutex_unlock(&vha->hw->optrom_mutex);
966		ql_log(ql_log_warn, vha, 0x7078,
967		    "Unable to allocate memory for DCBX TLV read-data.\n");
968		return -ENOMEM;
969	}
970
971do_read:
972	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
973
974	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
975	    DCBX_TLV_DATA_SIZE);
976
977	mutex_unlock(&vha->hw->optrom_mutex);
978
979	if (rval != QLA_SUCCESS) {
980		ql_log(ql_log_warn, vha, 0x7079,
981		    "Unable to read DCBX TLV (%x).\n", rval);
982		return -EIO;
983	}
984
985	memcpy(buf, ha->dcbx_tlv, count);
986
987	return count;
988}
989
990static struct bin_attribute sysfs_dcbx_tlv_attr = {
991	.attr = {
992		.name = "dcbx_tlv",
993		.mode = S_IRUSR,
994	},
995	.size = 0,
996	.read = qla2x00_sysfs_read_dcbx_tlv,
997};
998
999static struct sysfs_entry {
1000	char *name;
1001	struct bin_attribute *attr;
1002	int type;
1003} bin_file_entries[] = {
1004	{ "fw_dump", &sysfs_fw_dump_attr, },
1005	{ "nvram", &sysfs_nvram_attr, },
1006	{ "optrom", &sysfs_optrom_attr, },
1007	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
1008	{ "vpd", &sysfs_vpd_attr, 1 },
1009	{ "sfp", &sysfs_sfp_attr, 1 },
1010	{ "reset", &sysfs_reset_attr, },
1011	{ "issue_logo", &sysfs_issue_logo_attr, },
1012	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
1013	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
1014	{ NULL },
1015};
1016
1017void
1018qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
1019{
1020	struct Scsi_Host *host = vha->host;
1021	struct sysfs_entry *iter;
1022	int ret;
1023
1024	for (iter = bin_file_entries; iter->name; iter++) {
1025		if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
1026			continue;
1027		if (iter->type == 2 && !IS_QLA25XX(vha->hw))
1028			continue;
1029		if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
1030			continue;
1031
1032		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
1033		    iter->attr);
1034		if (ret)
1035			ql_log(ql_log_warn, vha, 0x00f3,
1036			    "Unable to create sysfs %s binary attribute (%d).\n",
1037			    iter->name, ret);
1038		else
1039			ql_dbg(ql_dbg_init, vha, 0x00f4,
1040			    "Successfully created sysfs %s binary attribute.\n",
1041			    iter->name);
1042	}
1043}
1044
1045void
1046qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1047{
1048	struct Scsi_Host *host = vha->host;
1049	struct sysfs_entry *iter;
1050	struct qla_hw_data *ha = vha->hw;
1051
1052	for (iter = bin_file_entries; iter->name; iter++) {
1053		if (iter->type && !IS_FWI2_CAPABLE(ha))
1054			continue;
1055		if (iter->type == 2 && !IS_QLA25XX(ha))
1056			continue;
1057		if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1058			continue;
1059
1060		sysfs_remove_bin_file(&host->shost_gendev.kobj,
1061		    iter->attr);
1062	}
1063
1064	if (stop_beacon && ha->beacon_blink_led == 1)
1065		ha->isp_ops->beacon_off(vha);
1066}
1067
1068/* Scsi_Host attributes. */
1069
1070static ssize_t
1071qla2x00_fw_version_show(struct device *dev,
1072			struct device_attribute *attr, char *buf)
1073{
1074	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1075	struct qla_hw_data *ha = vha->hw;
1076	char fw_str[128];
1077
1078	return scnprintf(buf, PAGE_SIZE, "%s\n",
1079	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1080}
1081
1082static ssize_t
1083qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1084			char *buf)
1085{
1086	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1087	struct qla_hw_data *ha = vha->hw;
1088	uint32_t sn;
1089
1090	if (IS_QLAFX00(vha->hw)) {
1091		return scnprintf(buf, PAGE_SIZE, "%s\n",
1092		    vha->hw->mr.serial_num);
1093	} else if (IS_FWI2_CAPABLE(ha)) {
1094		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1095		return strlen(strcat(buf, "\n"));
1096	}
1097
1098	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1099	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1100	    sn % 100000);
1101}
1102
1103static ssize_t
1104qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1105		      char *buf)
1106{
1107	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108
1109	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1110}
1111
1112static ssize_t
1113qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1114		    char *buf)
1115{
1116	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1117	struct qla_hw_data *ha = vha->hw;
1118
1119	if (IS_QLAFX00(vha->hw))
1120		return scnprintf(buf, PAGE_SIZE, "%s\n",
1121		    vha->hw->mr.hw_version);
1122
1123	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1124	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
1125	    ha->product_id[3]);
1126}
1127
1128static ssize_t
1129qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1130			char *buf)
1131{
1132	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1133
1134	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1135}
1136
1137static ssize_t
1138qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1139			char *buf)
1140{
1141	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1142
1143	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1144}
1145
1146static ssize_t
1147qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1148		      char *buf)
1149{
1150	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1151	char pci_info[30];
1152
1153	return scnprintf(buf, PAGE_SIZE, "%s\n",
1154			 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1155							sizeof(pci_info)));
1156}
1157
1158static ssize_t
1159qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1160			char *buf)
1161{
1162	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1163	struct qla_hw_data *ha = vha->hw;
1164	int len = 0;
1165
1166	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1167	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1168	    vha->device_flags & DFLG_NO_CABLE)
1169		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1170	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1171	    qla2x00_chip_is_down(vha))
1172		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1173	else {
1174		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1175
1176		switch (ha->current_topology) {
1177		case ISP_CFG_NL:
1178			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1179			break;
1180		case ISP_CFG_FL:
1181			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1182			break;
1183		case ISP_CFG_N:
1184			len += scnprintf(buf + len, PAGE_SIZE-len,
1185			    "N_Port to N_Port\n");
1186			break;
1187		case ISP_CFG_F:
1188			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1189			break;
1190		default:
1191			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1192			break;
1193		}
1194	}
1195	return len;
1196}
1197
1198static ssize_t
1199qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1200		 char *buf)
1201{
1202	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1203	int len = 0;
1204
1205	switch (vha->hw->zio_mode) {
1206	case QLA_ZIO_MODE_6:
1207		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1208		break;
1209	case QLA_ZIO_DISABLED:
1210		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1211		break;
1212	}
1213	return len;
1214}
1215
1216static ssize_t
1217qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1218		  const char *buf, size_t count)
1219{
1220	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1221	struct qla_hw_data *ha = vha->hw;
1222	int val = 0;
1223	uint16_t zio_mode;
1224
1225	if (!IS_ZIO_SUPPORTED(ha))
1226		return -ENOTSUPP;
1227
1228	if (sscanf(buf, "%d", &val) != 1)
1229		return -EINVAL;
1230
1231	if (val)
1232		zio_mode = QLA_ZIO_MODE_6;
1233	else
1234		zio_mode = QLA_ZIO_DISABLED;
1235
1236	/* Update per-hba values and queue a reset. */
1237	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1238		ha->zio_mode = zio_mode;
1239		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1240	}
1241	return strlen(buf);
1242}
1243
1244static ssize_t
1245qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1246		       char *buf)
1247{
1248	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1249
1250	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1251}
1252
1253static ssize_t
1254qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1255			const char *buf, size_t count)
1256{
1257	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1258	int val = 0;
1259	uint16_t zio_timer;
1260
1261	if (sscanf(buf, "%d", &val) != 1)
1262		return -EINVAL;
1263	if (val > 25500 || val < 100)
1264		return -ERANGE;
1265
1266	zio_timer = (uint16_t)(val / 100);
1267	vha->hw->zio_timer = zio_timer;
1268
1269	return strlen(buf);
1270}
1271
1272static ssize_t
1273qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1274		       char *buf)
1275{
1276	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1277
1278	return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1279	    vha->hw->last_zio_threshold);
1280}
1281
1282static ssize_t
1283qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1284    const char *buf, size_t count)
1285{
1286	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1287	int val = 0;
1288
1289	if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1290		return -EINVAL;
1291	if (sscanf(buf, "%d", &val) != 1)
1292		return -EINVAL;
1293	if (val < 0 || val > 256)
1294		return -ERANGE;
1295
1296	atomic_set(&vha->hw->zio_threshold, val);
1297	return strlen(buf);
1298}
1299
1300static ssize_t
1301qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1302		    char *buf)
1303{
1304	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1305	int len = 0;
1306
1307	if (vha->hw->beacon_blink_led)
1308		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1309	else
1310		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1311	return len;
1312}
1313
1314static ssize_t
1315qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1316		     const char *buf, size_t count)
1317{
1318	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1319	struct qla_hw_data *ha = vha->hw;
1320	int val = 0;
1321	int rval;
1322
1323	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1324		return -EPERM;
1325
1326	if (sscanf(buf, "%d", &val) != 1)
1327		return -EINVAL;
1328
1329	mutex_lock(&vha->hw->optrom_mutex);
1330	if (qla2x00_chip_is_down(vha)) {
1331		mutex_unlock(&vha->hw->optrom_mutex);
1332		ql_log(ql_log_warn, vha, 0x707a,
1333		    "Abort ISP active -- ignoring beacon request.\n");
1334		return -EBUSY;
1335	}
1336
1337	if (val)
1338		rval = ha->isp_ops->beacon_on(vha);
1339	else
1340		rval = ha->isp_ops->beacon_off(vha);
1341
1342	if (rval != QLA_SUCCESS)
1343		count = 0;
1344
1345	mutex_unlock(&vha->hw->optrom_mutex);
1346
1347	return count;
1348}
1349
1350static ssize_t
1351qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
1352	char *buf)
1353{
1354	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1355	struct qla_hw_data *ha = vha->hw;
1356	uint16_t led[3] = { 0 };
1357
1358	if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1359		return -EPERM;
1360
1361	if (ql26xx_led_config(vha, 0, led))
1362		return scnprintf(buf, PAGE_SIZE, "\n");
1363
1364	return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
1365	    led[0], led[1], led[2]);
1366}
1367
1368static ssize_t
1369qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
1370	const char *buf, size_t count)
1371{
1372	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1373	struct qla_hw_data *ha = vha->hw;
1374	uint16_t options = BIT_0;
1375	uint16_t led[3] = { 0 };
1376	uint16_t word[4];
1377	int n;
1378
1379	if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1380		return -EPERM;
1381
1382	n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
1383	if (n == 4) {
1384		if (word[0] == 3) {
1385			options |= BIT_3|BIT_2|BIT_1;
1386			led[0] = word[1];
1387			led[1] = word[2];
1388			led[2] = word[3];
1389			goto write;
1390		}
1391		return -EINVAL;
1392	}
1393
1394	if (n == 2) {
1395		/* check led index */
1396		if (word[0] == 0) {
1397			options |= BIT_2;
1398			led[0] = word[1];
1399			goto write;
1400		}
1401		if (word[0] == 1) {
1402			options |= BIT_3;
1403			led[1] = word[1];
1404			goto write;
1405		}
1406		if (word[0] == 2) {
1407			options |= BIT_1;
1408			led[2] = word[1];
1409			goto write;
1410		}
1411		return -EINVAL;
1412	}
1413
1414	return -EINVAL;
1415
1416write:
1417	if (ql26xx_led_config(vha, options, led))
1418		return -EFAULT;
1419
1420	return count;
1421}
1422
1423static ssize_t
1424qla2x00_optrom_bios_version_show(struct device *dev,
1425				 struct device_attribute *attr, char *buf)
1426{
1427	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1428	struct qla_hw_data *ha = vha->hw;
1429
1430	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1431	    ha->bios_revision[0]);
1432}
1433
1434static ssize_t
1435qla2x00_optrom_efi_version_show(struct device *dev,
1436				struct device_attribute *attr, char *buf)
1437{
1438	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1439	struct qla_hw_data *ha = vha->hw;
1440
1441	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1442	    ha->efi_revision[0]);
1443}
1444
1445static ssize_t
1446qla2x00_optrom_fcode_version_show(struct device *dev,
1447				  struct device_attribute *attr, char *buf)
1448{
1449	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1450	struct qla_hw_data *ha = vha->hw;
1451
1452	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1453	    ha->fcode_revision[0]);
1454}
1455
1456static ssize_t
1457qla2x00_optrom_fw_version_show(struct device *dev,
1458			       struct device_attribute *attr, char *buf)
1459{
1460	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1461	struct qla_hw_data *ha = vha->hw;
1462
1463	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1464	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1465	    ha->fw_revision[3]);
1466}
1467
1468static ssize_t
1469qla2x00_optrom_gold_fw_version_show(struct device *dev,
1470    struct device_attribute *attr, char *buf)
1471{
1472	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1473	struct qla_hw_data *ha = vha->hw;
1474
1475	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1476	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1477		return scnprintf(buf, PAGE_SIZE, "\n");
1478
1479	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1480	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1481	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1482}
1483
1484static ssize_t
1485qla2x00_total_isp_aborts_show(struct device *dev,
1486			      struct device_attribute *attr, char *buf)
1487{
1488	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1489
1490	return scnprintf(buf, PAGE_SIZE, "%d\n",
1491	    vha->qla_stats.total_isp_aborts);
1492}
1493
1494static ssize_t
1495qla24xx_84xx_fw_version_show(struct device *dev,
1496	struct device_attribute *attr, char *buf)
1497{
1498	int rval = QLA_SUCCESS;
1499	uint16_t status[2] = { 0 };
1500	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1501	struct qla_hw_data *ha = vha->hw;
1502
1503	if (!IS_QLA84XX(ha))
1504		return scnprintf(buf, PAGE_SIZE, "\n");
1505
1506	if (!ha->cs84xx->op_fw_version) {
1507		rval = qla84xx_verify_chip(vha, status);
1508
1509		if (!rval && !status[0])
1510			return scnprintf(buf, PAGE_SIZE, "%u\n",
1511			    (uint32_t)ha->cs84xx->op_fw_version);
1512	}
1513
1514	return scnprintf(buf, PAGE_SIZE, "\n");
1515}
1516
1517static ssize_t
1518qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1519    char *buf)
1520{
1521	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1522	struct qla_hw_data *ha = vha->hw;
1523
1524	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1525		return scnprintf(buf, PAGE_SIZE, "\n");
1526
1527	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1528	    ha->serdes_version[0], ha->serdes_version[1],
1529	    ha->serdes_version[2]);
1530}
1531
1532static ssize_t
1533qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1534    char *buf)
1535{
1536	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1537	struct qla_hw_data *ha = vha->hw;
1538
1539	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1540	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1541		return scnprintf(buf, PAGE_SIZE, "\n");
1542
1543	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1544	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1545	    ha->mpi_capabilities);
1546}
1547
1548static ssize_t
1549qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1550    char *buf)
1551{
1552	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1553	struct qla_hw_data *ha = vha->hw;
1554
1555	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1556		return scnprintf(buf, PAGE_SIZE, "\n");
1557
1558	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1559	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1560}
1561
1562static ssize_t
1563qla2x00_flash_block_size_show(struct device *dev,
1564			      struct device_attribute *attr, char *buf)
1565{
1566	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1567	struct qla_hw_data *ha = vha->hw;
1568
1569	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1570}
1571
1572static ssize_t
1573qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1574    char *buf)
1575{
1576	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1577
1578	if (!IS_CNA_CAPABLE(vha->hw))
1579		return scnprintf(buf, PAGE_SIZE, "\n");
1580
1581	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1582}
1583
1584static ssize_t
1585qla2x00_vn_port_mac_address_show(struct device *dev,
1586    struct device_attribute *attr, char *buf)
1587{
1588	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1589
1590	if (!IS_CNA_CAPABLE(vha->hw))
1591		return scnprintf(buf, PAGE_SIZE, "\n");
1592
1593	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1594}
1595
1596static ssize_t
1597qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1598    char *buf)
1599{
1600	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1601
1602	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1603}
1604
1605static ssize_t
1606qla2x00_thermal_temp_show(struct device *dev,
1607	struct device_attribute *attr, char *buf)
1608{
1609	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1610	uint16_t temp = 0;
1611	int rc;
1612
1613	mutex_lock(&vha->hw->optrom_mutex);
1614	if (qla2x00_chip_is_down(vha)) {
1615		mutex_unlock(&vha->hw->optrom_mutex);
1616		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1617		goto done;
1618	}
1619
1620	if (vha->hw->flags.eeh_busy) {
1621		mutex_unlock(&vha->hw->optrom_mutex);
1622		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1623		goto done;
1624	}
1625
1626	rc = qla2x00_get_thermal_temp(vha, &temp);
1627	mutex_unlock(&vha->hw->optrom_mutex);
1628	if (rc == QLA_SUCCESS)
1629		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1630
1631done:
1632	return scnprintf(buf, PAGE_SIZE, "\n");
1633}
1634
1635static ssize_t
1636qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1637    char *buf)
1638{
1639	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1640	int rval = QLA_FUNCTION_FAILED;
1641	uint16_t state[6];
1642	uint32_t pstate;
1643
1644	if (IS_QLAFX00(vha->hw)) {
1645		pstate = qlafx00_fw_state_show(dev, attr, buf);
1646		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1647	}
1648
1649	mutex_lock(&vha->hw->optrom_mutex);
1650	if (qla2x00_chip_is_down(vha)) {
1651		mutex_unlock(&vha->hw->optrom_mutex);
1652		ql_log(ql_log_warn, vha, 0x707c,
1653		    "ISP reset active.\n");
1654		goto out;
1655	} else if (vha->hw->flags.eeh_busy) {
1656		mutex_unlock(&vha->hw->optrom_mutex);
1657		goto out;
1658	}
1659
1660	rval = qla2x00_get_firmware_state(vha, state);
1661	mutex_unlock(&vha->hw->optrom_mutex);
1662out:
1663	if (rval != QLA_SUCCESS) {
1664		memset(state, -1, sizeof(state));
1665		rval = qla2x00_get_firmware_state(vha, state);
1666	}
1667
1668	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1669	    state[0], state[1], state[2], state[3], state[4], state[5]);
1670}
1671
1672static ssize_t
1673qla2x00_diag_requests_show(struct device *dev,
1674	struct device_attribute *attr, char *buf)
1675{
1676	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1677
1678	if (!IS_BIDI_CAPABLE(vha->hw))
1679		return scnprintf(buf, PAGE_SIZE, "\n");
1680
1681	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1682}
1683
1684static ssize_t
1685qla2x00_diag_megabytes_show(struct device *dev,
1686	struct device_attribute *attr, char *buf)
1687{
1688	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1689
1690	if (!IS_BIDI_CAPABLE(vha->hw))
1691		return scnprintf(buf, PAGE_SIZE, "\n");
1692
1693	return scnprintf(buf, PAGE_SIZE, "%llu\n",
1694	    vha->bidi_stats.transfer_bytes >> 20);
1695}
1696
1697static ssize_t
1698qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1699	char *buf)
1700{
1701	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1702	struct qla_hw_data *ha = vha->hw;
1703	uint32_t size;
1704
1705	if (!ha->fw_dumped)
1706		size = 0;
1707	else if (IS_P3P_TYPE(ha))
1708		size = ha->md_template_size + ha->md_dump_size;
1709	else
1710		size = ha->fw_dump_len;
1711
1712	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1713}
1714
1715static ssize_t
1716qla2x00_allow_cna_fw_dump_show(struct device *dev,
1717	struct device_attribute *attr, char *buf)
1718{
1719	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1720
1721	if (!IS_P3P_TYPE(vha->hw))
1722		return scnprintf(buf, PAGE_SIZE, "\n");
1723	else
1724		return scnprintf(buf, PAGE_SIZE, "%s\n",
1725		    vha->hw->allow_cna_fw_dump ? "true" : "false");
1726}
1727
1728static ssize_t
1729qla2x00_allow_cna_fw_dump_store(struct device *dev,
1730	struct device_attribute *attr, const char *buf, size_t count)
1731{
1732	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1733	int val = 0;
1734
1735	if (!IS_P3P_TYPE(vha->hw))
1736		return -EINVAL;
1737
1738	if (sscanf(buf, "%d", &val) != 1)
1739		return -EINVAL;
1740
1741	vha->hw->allow_cna_fw_dump = val != 0;
1742
1743	return strlen(buf);
1744}
1745
1746static ssize_t
1747qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1748	char *buf)
1749{
1750	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1751	struct qla_hw_data *ha = vha->hw;
1752
1753	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1754		return scnprintf(buf, PAGE_SIZE, "\n");
1755
1756	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1757	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1758}
1759
1760static ssize_t
1761qla2x00_min_supported_speed_show(struct device *dev,
1762    struct device_attribute *attr, char *buf)
1763{
1764	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1765	struct qla_hw_data *ha = vha->hw;
1766
1767	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1768		return scnprintf(buf, PAGE_SIZE, "\n");
1769
1770	return scnprintf(buf, PAGE_SIZE, "%s\n",
1771	    ha->min_supported_speed == 6 ? "64Gps" :
1772	    ha->min_supported_speed == 5 ? "32Gps" :
1773	    ha->min_supported_speed == 4 ? "16Gps" :
1774	    ha->min_supported_speed == 3 ? "8Gps" :
1775	    ha->min_supported_speed == 2 ? "4Gps" :
1776	    ha->min_supported_speed != 0 ? "unknown" : "");
1777}
1778
1779static ssize_t
1780qla2x00_max_supported_speed_show(struct device *dev,
1781    struct device_attribute *attr, char *buf)
1782{
1783	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1784	struct qla_hw_data *ha = vha->hw;
1785
1786	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1787		return scnprintf(buf, PAGE_SIZE, "\n");
1788
1789	return scnprintf(buf, PAGE_SIZE, "%s\n",
1790	    ha->max_supported_speed  == 2 ? "64Gps" :
1791	    ha->max_supported_speed  == 1 ? "32Gps" :
1792	    ha->max_supported_speed  == 0 ? "16Gps" : "unknown");
1793}
1794
1795static ssize_t
1796qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1797    const char *buf, size_t count)
1798{
1799	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1800	ulong type, speed;
1801	int oldspeed, rval;
1802	int mode = QLA_SET_DATA_RATE_LR;
1803	struct qla_hw_data *ha = vha->hw;
1804
1805	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1806		ql_log(ql_log_warn, vha, 0x70d8,
1807		    "Speed setting not supported \n");
1808		return -EINVAL;
1809	}
1810
1811	rval = kstrtol(buf, 10, &type);
1812	if (rval)
1813		return rval;
1814	speed = type;
1815	if (type == 40 || type == 80 || type == 160 ||
1816	    type == 320) {
1817		ql_dbg(ql_dbg_user, vha, 0x70d9,
1818		    "Setting will be affected after a loss of sync\n");
1819		type = type/10;
1820		mode = QLA_SET_DATA_RATE_NOLR;
1821	}
1822
1823	oldspeed = ha->set_data_rate;
1824
1825	switch (type) {
1826	case 0:
1827		ha->set_data_rate = PORT_SPEED_AUTO;
1828		break;
1829	case 4:
1830		ha->set_data_rate = PORT_SPEED_4GB;
1831		break;
1832	case 8:
1833		ha->set_data_rate = PORT_SPEED_8GB;
1834		break;
1835	case 16:
1836		ha->set_data_rate = PORT_SPEED_16GB;
1837		break;
1838	case 32:
1839		ha->set_data_rate = PORT_SPEED_32GB;
1840		break;
1841	default:
1842		ql_log(ql_log_warn, vha, 0x1199,
1843		    "Unrecognized speed setting:%lx. Setting Autoneg\n",
1844		    speed);
1845		ha->set_data_rate = PORT_SPEED_AUTO;
1846	}
1847
1848	if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1849		return -EINVAL;
1850
1851	ql_log(ql_log_info, vha, 0x70da,
1852	    "Setting speed to %lx Gbps \n", type);
1853
1854	rval = qla2x00_set_data_rate(vha, mode);
1855	if (rval != QLA_SUCCESS)
1856		return -EIO;
1857
1858	return strlen(buf);
1859}
1860
1861static const struct {
1862	u16 rate;
1863	char *str;
1864} port_speed_str[] = {
1865	{ PORT_SPEED_4GB, "4" },
1866	{ PORT_SPEED_8GB, "8" },
1867	{ PORT_SPEED_16GB, "16" },
1868	{ PORT_SPEED_32GB, "32" },
1869	{ PORT_SPEED_64GB, "64" },
1870	{ PORT_SPEED_10GB, "10" },
1871};
1872
1873static ssize_t
1874qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1875    char *buf)
1876{
1877	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1878	struct qla_hw_data *ha = vha->hw;
1879	ssize_t rval;
1880	u16 i;
1881	char *speed = "Unknown";
1882
1883	rval = qla2x00_get_data_rate(vha);
1884	if (rval != QLA_SUCCESS) {
1885		ql_log(ql_log_warn, vha, 0x70db,
1886		    "Unable to get port speed rval:%zd\n", rval);
1887		return -EINVAL;
1888	}
1889
1890	for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
1891		if (port_speed_str[i].rate != ha->link_data_rate)
1892			continue;
1893		speed = port_speed_str[i].str;
1894		break;
1895	}
1896
1897	return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
1898}
1899
1900static ssize_t
1901qla2x00_mpi_pause_store(struct device *dev,
1902	struct device_attribute *attr, const char *buf, size_t count)
1903{
1904	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1905	int rval = 0;
1906
1907	if (sscanf(buf, "%d", &rval) != 1)
1908		return -EINVAL;
1909
1910	ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n");
1911
1912	rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001);
1913
1914	if (rval != QLA_SUCCESS) {
1915		ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n");
1916		count = 0;
1917	}
1918
1919	return count;
1920}
1921
1922static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store);
1923
1924/* ----- */
1925
1926static ssize_t
1927qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1928{
1929	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1930	int len = 0;
1931
1932	len += scnprintf(buf + len, PAGE_SIZE-len,
1933	    "Supported options: enabled | disabled | dual | exclusive\n");
1934
1935	/* --- */
1936	len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1937
1938	switch (vha->qlini_mode) {
1939	case QLA2XXX_INI_MODE_EXCLUSIVE:
1940		len += scnprintf(buf + len, PAGE_SIZE-len,
1941		    QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1942		break;
1943	case QLA2XXX_INI_MODE_DISABLED:
1944		len += scnprintf(buf + len, PAGE_SIZE-len,
1945		    QLA2XXX_INI_MODE_STR_DISABLED);
1946		break;
1947	case QLA2XXX_INI_MODE_ENABLED:
1948		len += scnprintf(buf + len, PAGE_SIZE-len,
1949		    QLA2XXX_INI_MODE_STR_ENABLED);
1950		break;
1951	case QLA2XXX_INI_MODE_DUAL:
1952		len += scnprintf(buf + len, PAGE_SIZE-len,
1953		    QLA2XXX_INI_MODE_STR_DUAL);
1954		break;
1955	}
1956	len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1957
1958	return len;
1959}
1960
1961static char *mode_to_str[] = {
1962	"exclusive",
1963	"disabled",
1964	"enabled",
1965	"dual",
1966};
1967
1968#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1969static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1970{
1971	enum {
1972		NO_ACTION,
1973		MODE_CHANGE_ACCEPT,
1974		MODE_CHANGE_NO_ACTION,
1975		TARGET_STILL_ACTIVE,
1976	};
1977	int action = NO_ACTION;
1978	int set_mode = 0;
1979	u8  eo_toggle = 0;	/* exchange offload flipped */
1980
1981	switch (vha->qlini_mode) {
1982	case QLA2XXX_INI_MODE_DISABLED:
1983		switch (op) {
1984		case QLA2XXX_INI_MODE_DISABLED:
1985			if (qla_tgt_mode_enabled(vha)) {
1986				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1987				    vha->hw->flags.exchoffld_enabled)
1988					eo_toggle = 1;
1989				if (((vha->ql2xexchoffld !=
1990				    vha->u_ql2xexchoffld) &&
1991				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1992				    eo_toggle) {
1993					/*
1994					 * The number of exchange to be offload
1995					 * was tweaked or offload option was
1996					 * flipped
1997					 */
1998					action = MODE_CHANGE_ACCEPT;
1999				} else {
2000					action = MODE_CHANGE_NO_ACTION;
2001				}
2002			} else {
2003				action = MODE_CHANGE_NO_ACTION;
2004			}
2005			break;
2006		case QLA2XXX_INI_MODE_EXCLUSIVE:
2007			if (qla_tgt_mode_enabled(vha)) {
2008				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2009				    vha->hw->flags.exchoffld_enabled)
2010					eo_toggle = 1;
2011				if (((vha->ql2xexchoffld !=
2012				    vha->u_ql2xexchoffld) &&
2013				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2014				    eo_toggle) {
2015					/*
2016					 * The number of exchange to be offload
2017					 * was tweaked or offload option was
2018					 * flipped
2019					 */
2020					action = MODE_CHANGE_ACCEPT;
2021				} else {
2022					action = MODE_CHANGE_NO_ACTION;
2023				}
2024			} else {
2025				action = MODE_CHANGE_ACCEPT;
2026			}
2027			break;
2028		case QLA2XXX_INI_MODE_DUAL:
2029			action = MODE_CHANGE_ACCEPT;
2030			/* active_mode is target only, reset it to dual */
2031			if (qla_tgt_mode_enabled(vha)) {
2032				set_mode = 1;
2033				action = MODE_CHANGE_ACCEPT;
2034			} else {
2035				action = MODE_CHANGE_NO_ACTION;
2036			}
2037			break;
2038
2039		case QLA2XXX_INI_MODE_ENABLED:
2040			if (qla_tgt_mode_enabled(vha))
2041				action = TARGET_STILL_ACTIVE;
2042			else {
2043				action = MODE_CHANGE_ACCEPT;
2044				set_mode = 1;
2045			}
2046			break;
2047		}
2048		break;
2049
2050	case QLA2XXX_INI_MODE_EXCLUSIVE:
2051		switch (op) {
2052		case QLA2XXX_INI_MODE_EXCLUSIVE:
2053			if (qla_tgt_mode_enabled(vha)) {
2054				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2055				    vha->hw->flags.exchoffld_enabled)
2056					eo_toggle = 1;
2057				if (((vha->ql2xexchoffld !=
2058				    vha->u_ql2xexchoffld) &&
2059				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2060				    eo_toggle)
2061					/*
2062					 * The number of exchange to be offload
2063					 * was tweaked or offload option was
2064					 * flipped
2065					 */
2066					action = MODE_CHANGE_ACCEPT;
2067				else
2068					action = NO_ACTION;
2069			} else
2070				action = NO_ACTION;
2071
2072			break;
2073
2074		case QLA2XXX_INI_MODE_DISABLED:
2075			if (qla_tgt_mode_enabled(vha)) {
2076				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2077				    vha->hw->flags.exchoffld_enabled)
2078					eo_toggle = 1;
2079				if (((vha->ql2xexchoffld !=
2080				      vha->u_ql2xexchoffld) &&
2081				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2082				    eo_toggle)
2083					action = MODE_CHANGE_ACCEPT;
2084				else
2085					action = MODE_CHANGE_NO_ACTION;
2086			} else
2087				action = MODE_CHANGE_NO_ACTION;
2088			break;
2089
2090		case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
2091			if (qla_tgt_mode_enabled(vha)) {
2092				action = MODE_CHANGE_ACCEPT;
2093				set_mode = 1;
2094			} else
2095				action = MODE_CHANGE_ACCEPT;
2096			break;
2097
2098		case QLA2XXX_INI_MODE_ENABLED:
2099			if (qla_tgt_mode_enabled(vha))
2100				action = TARGET_STILL_ACTIVE;
2101			else {
2102				if (vha->hw->flags.fw_started)
2103					action = MODE_CHANGE_NO_ACTION;
2104				else
2105					action = MODE_CHANGE_ACCEPT;
2106			}
2107			break;
2108		}
2109		break;
2110
2111	case QLA2XXX_INI_MODE_ENABLED:
2112		switch (op) {
2113		case QLA2XXX_INI_MODE_ENABLED:
2114			if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
2115			    vha->hw->flags.exchoffld_enabled)
2116				eo_toggle = 1;
2117			if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
2118				NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
2119			    eo_toggle)
2120				action = MODE_CHANGE_ACCEPT;
2121			else
2122				action = NO_ACTION;
2123			break;
2124		case QLA2XXX_INI_MODE_DUAL:
2125		case QLA2XXX_INI_MODE_DISABLED:
2126			action = MODE_CHANGE_ACCEPT;
2127			break;
2128		default:
2129			action = MODE_CHANGE_NO_ACTION;
2130			break;
2131		}
2132		break;
2133
2134	case QLA2XXX_INI_MODE_DUAL:
2135		switch (op) {
2136		case QLA2XXX_INI_MODE_DUAL:
2137			if (qla_tgt_mode_enabled(vha) ||
2138			    qla_dual_mode_enabled(vha)) {
2139				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2140					vha->u_ql2xiniexchg) !=
2141				    vha->hw->flags.exchoffld_enabled)
2142					eo_toggle = 1;
2143
2144				if ((((vha->ql2xexchoffld +
2145				       vha->ql2xiniexchg) !=
2146				    (vha->u_ql2xiniexchg +
2147				     vha->u_ql2xexchoffld)) &&
2148				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2149					vha->u_ql2xexchoffld)) || eo_toggle)
2150					action = MODE_CHANGE_ACCEPT;
2151				else
2152					action = NO_ACTION;
2153			} else {
2154				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2155					vha->u_ql2xiniexchg) !=
2156				    vha->hw->flags.exchoffld_enabled)
2157					eo_toggle = 1;
2158
2159				if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2160				    != (vha->u_ql2xiniexchg +
2161					vha->u_ql2xexchoffld)) &&
2162				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2163					vha->u_ql2xexchoffld)) || eo_toggle)
2164					action = MODE_CHANGE_NO_ACTION;
2165				else
2166					action = NO_ACTION;
2167			}
2168			break;
2169
2170		case QLA2XXX_INI_MODE_DISABLED:
2171			if (qla_tgt_mode_enabled(vha) ||
2172			    qla_dual_mode_enabled(vha)) {
2173				/* turning off initiator mode */
2174				set_mode = 1;
2175				action = MODE_CHANGE_ACCEPT;
2176			} else {
2177				action = MODE_CHANGE_NO_ACTION;
2178			}
2179			break;
2180
2181		case QLA2XXX_INI_MODE_EXCLUSIVE:
2182			if (qla_tgt_mode_enabled(vha) ||
2183			    qla_dual_mode_enabled(vha)) {
2184				set_mode = 1;
2185				action = MODE_CHANGE_ACCEPT;
2186			} else {
2187				action = MODE_CHANGE_ACCEPT;
2188			}
2189			break;
2190
2191		case QLA2XXX_INI_MODE_ENABLED:
2192			if (qla_tgt_mode_enabled(vha) ||
2193			    qla_dual_mode_enabled(vha)) {
2194				action = TARGET_STILL_ACTIVE;
2195			} else {
2196				action = MODE_CHANGE_ACCEPT;
2197			}
2198		}
2199		break;
2200	}
2201
2202	switch (action) {
2203	case MODE_CHANGE_ACCEPT:
2204		ql_log(ql_log_warn, vha, 0xffff,
2205		    "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2206		    mode_to_str[vha->qlini_mode], mode_to_str[op],
2207		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2208		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2209
2210		vha->qlini_mode = op;
2211		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2212		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2213		if (set_mode)
2214			qlt_set_mode(vha);
2215		vha->flags.online = 1;
2216		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2217		break;
2218
2219	case MODE_CHANGE_NO_ACTION:
2220		ql_log(ql_log_warn, vha, 0xffff,
2221		    "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2222		    mode_to_str[vha->qlini_mode], mode_to_str[op],
2223		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2224		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2225		vha->qlini_mode = op;
2226		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2227		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2228		break;
2229
2230	case TARGET_STILL_ACTIVE:
2231		ql_log(ql_log_warn, vha, 0xffff,
2232		    "Target Mode is active. Unable to change Mode.\n");
2233		break;
2234
2235	case NO_ACTION:
2236	default:
2237		ql_log(ql_log_warn, vha, 0xffff,
2238		    "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2239		    vha->qlini_mode, op,
2240		    vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2241		break;
2242	}
2243}
2244
2245static ssize_t
2246qlini_mode_store(struct device *dev, struct device_attribute *attr,
2247    const char *buf, size_t count)
2248{
2249	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2250	int ini;
2251
2252	if (!buf)
2253		return -EINVAL;
2254
2255	if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2256		strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2257		ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2258	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2259		strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2260		ini = QLA2XXX_INI_MODE_DISABLED;
2261	else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2262		  strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2263		ini = QLA2XXX_INI_MODE_ENABLED;
2264	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2265		strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2266		ini = QLA2XXX_INI_MODE_DUAL;
2267	else
2268		return -EINVAL;
2269
2270	qla_set_ini_mode(vha, ini);
2271	return strlen(buf);
2272}
2273
2274static ssize_t
2275ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2276    char *buf)
2277{
2278	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2279	int len = 0;
2280
2281	len += scnprintf(buf + len, PAGE_SIZE-len,
2282		"target exchange: new %d : current: %d\n\n",
2283		vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2284
2285	len += scnprintf(buf + len, PAGE_SIZE-len,
2286	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2287	    vha->host_no);
2288
2289	return len;
2290}
2291
2292static ssize_t
2293ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2294    const char *buf, size_t count)
2295{
2296	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2297	int val = 0;
2298
2299	if (sscanf(buf, "%d", &val) != 1)
2300		return -EINVAL;
2301
2302	if (val > FW_MAX_EXCHANGES_CNT)
2303		val = FW_MAX_EXCHANGES_CNT;
2304	else if (val < 0)
2305		val = 0;
2306
2307	vha->u_ql2xexchoffld = val;
2308	return strlen(buf);
2309}
2310
2311static ssize_t
2312ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2313    char *buf)
2314{
2315	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2316	int len = 0;
2317
2318	len += scnprintf(buf + len, PAGE_SIZE-len,
2319		"target exchange: new %d : current: %d\n\n",
2320		vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2321
2322	len += scnprintf(buf + len, PAGE_SIZE-len,
2323	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2324	    vha->host_no);
2325
2326	return len;
2327}
2328
2329static ssize_t
2330ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2331    const char *buf, size_t count)
2332{
2333	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2334	int val = 0;
2335
2336	if (sscanf(buf, "%d", &val) != 1)
2337		return -EINVAL;
2338
2339	if (val > FW_MAX_EXCHANGES_CNT)
2340		val = FW_MAX_EXCHANGES_CNT;
2341	else if (val < 0)
2342		val = 0;
2343
2344	vha->u_ql2xiniexchg = val;
2345	return strlen(buf);
2346}
2347
2348static ssize_t
2349qla2x00_dif_bundle_statistics_show(struct device *dev,
2350    struct device_attribute *attr, char *buf)
2351{
2352	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2353	struct qla_hw_data *ha = vha->hw;
2354
2355	return scnprintf(buf, PAGE_SIZE,
2356	    "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2357	    ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2358	    ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2359	    ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2360}
2361
2362static ssize_t
2363qla2x00_fw_attr_show(struct device *dev,
2364    struct device_attribute *attr, char *buf)
2365{
2366	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2367	struct qla_hw_data *ha = vha->hw;
2368
2369	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2370		return scnprintf(buf, PAGE_SIZE, "\n");
2371
2372	return scnprintf(buf, PAGE_SIZE, "%llx\n",
2373	    (uint64_t)ha->fw_attributes_ext[1] << 48 |
2374	    (uint64_t)ha->fw_attributes_ext[0] << 32 |
2375	    (uint64_t)ha->fw_attributes_h << 16 |
2376	    (uint64_t)ha->fw_attributes);
2377}
2378
2379static ssize_t
2380qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2381    char *buf)
2382{
2383	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2384
2385	return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2386}
2387
2388static ssize_t
2389qla2x00_dport_diagnostics_show(struct device *dev,
2390	struct device_attribute *attr, char *buf)
2391{
2392	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2393
2394	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2395	    !IS_QLA28XX(vha->hw))
2396		return scnprintf(buf, PAGE_SIZE, "\n");
2397
2398	if (!*vha->dport_data)
2399		return scnprintf(buf, PAGE_SIZE, "\n");
2400
2401	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
2402	    vha->dport_data[0], vha->dport_data[1],
2403	    vha->dport_data[2], vha->dport_data[3]);
2404}
2405static DEVICE_ATTR(dport_diagnostics, 0444,
2406	   qla2x00_dport_diagnostics_show, NULL);
2407
2408static DEVICE_STRING_ATTR_RO(driver_version, S_IRUGO, qla2x00_version_str);
2409static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2410static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2411static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2412static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2413static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2414static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2415static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2416static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2417static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2418static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2419		   qla2x00_zio_timer_store);
2420static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2421		   qla2x00_beacon_store);
2422static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
2423		   qla2x00_beacon_config_store);
2424static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2425		   qla2x00_optrom_bios_version_show, NULL);
2426static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2427		   qla2x00_optrom_efi_version_show, NULL);
2428static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2429		   qla2x00_optrom_fcode_version_show, NULL);
2430static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2431		   NULL);
2432static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2433    qla2x00_optrom_gold_fw_version_show, NULL);
2434static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2435		   NULL);
2436static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2437		   NULL);
2438static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2439static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2440static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2441static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2442		   NULL);
2443static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2444static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2445		   qla2x00_vn_port_mac_address_show, NULL);
2446static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2447static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2448static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2449static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2450static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2451static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2452static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2453		   qla2x00_allow_cna_fw_dump_show,
2454		   qla2x00_allow_cna_fw_dump_store);
2455static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2456static DEVICE_ATTR(min_supported_speed, 0444,
2457		   qla2x00_min_supported_speed_show, NULL);
2458static DEVICE_ATTR(max_supported_speed, 0444,
2459		   qla2x00_max_supported_speed_show, NULL);
2460static DEVICE_ATTR(zio_threshold, 0644,
2461    qla_zio_threshold_show,
2462    qla_zio_threshold_store);
2463static DEVICE_ATTR_RW(qlini_mode);
2464static DEVICE_ATTR_RW(ql2xexchoffld);
2465static DEVICE_ATTR_RW(ql2xiniexchg);
2466static DEVICE_ATTR(dif_bundle_statistics, 0444,
2467    qla2x00_dif_bundle_statistics_show, NULL);
2468static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2469    qla2x00_port_speed_store);
2470static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2471static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2472
2473static struct attribute *qla2x00_host_attrs[] = {
2474	&dev_attr_driver_version.attr.attr,
2475	&dev_attr_fw_version.attr,
2476	&dev_attr_serial_num.attr,
2477	&dev_attr_isp_name.attr,
2478	&dev_attr_isp_id.attr,
2479	&dev_attr_model_name.attr,
2480	&dev_attr_model_desc.attr,
2481	&dev_attr_pci_info.attr,
2482	&dev_attr_link_state.attr,
2483	&dev_attr_zio.attr,
2484	&dev_attr_zio_timer.attr,
2485	&dev_attr_beacon.attr,
2486	&dev_attr_beacon_config.attr,
2487	&dev_attr_optrom_bios_version.attr,
2488	&dev_attr_optrom_efi_version.attr,
2489	&dev_attr_optrom_fcode_version.attr,
2490	&dev_attr_optrom_fw_version.attr,
2491	&dev_attr_84xx_fw_version.attr,
2492	&dev_attr_total_isp_aborts.attr,
2493	&dev_attr_serdes_version.attr,
2494	&dev_attr_mpi_version.attr,
2495	&dev_attr_phy_version.attr,
2496	&dev_attr_flash_block_size.attr,
2497	&dev_attr_vlan_id.attr,
2498	&dev_attr_vn_port_mac_address.attr,
2499	&dev_attr_fabric_param.attr,
2500	&dev_attr_fw_state.attr,
2501	&dev_attr_optrom_gold_fw_version.attr,
2502	&dev_attr_thermal_temp.attr,
2503	&dev_attr_diag_requests.attr,
2504	&dev_attr_diag_megabytes.attr,
2505	&dev_attr_fw_dump_size.attr,
2506	&dev_attr_allow_cna_fw_dump.attr,
2507	&dev_attr_pep_version.attr,
2508	&dev_attr_min_supported_speed.attr,
2509	&dev_attr_max_supported_speed.attr,
2510	&dev_attr_zio_threshold.attr,
2511	&dev_attr_dif_bundle_statistics.attr,
2512	&dev_attr_port_speed.attr,
2513	&dev_attr_port_no.attr,
2514	&dev_attr_fw_attr.attr,
2515	&dev_attr_dport_diagnostics.attr,
2516	&dev_attr_mpi_pause.attr,
2517	&dev_attr_qlini_mode.attr,
2518	&dev_attr_ql2xiniexchg.attr,
2519	&dev_attr_ql2xexchoffld.attr,
2520	NULL,
2521};
2522
2523static umode_t qla_host_attr_is_visible(struct kobject *kobj,
2524					struct attribute *attr, int i)
2525{
2526	if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL &&
2527	    (attr == &dev_attr_qlini_mode.attr ||
2528	     attr == &dev_attr_ql2xiniexchg.attr ||
2529	     attr == &dev_attr_ql2xexchoffld.attr))
2530		return 0;
2531	return attr->mode;
2532}
2533
2534static const struct attribute_group qla2x00_host_attr_group = {
2535	.is_visible = qla_host_attr_is_visible,
2536	.attrs = qla2x00_host_attrs
2537};
2538
2539const struct attribute_group *qla2x00_host_groups[] = {
2540	&qla2x00_host_attr_group,
2541	NULL
2542};
2543
2544/* Host attributes. */
2545
2546static void
2547qla2x00_get_host_port_id(struct Scsi_Host *shost)
2548{
2549	scsi_qla_host_t *vha = shost_priv(shost);
2550
2551	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2552	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2553}
2554
2555static void
2556qla2x00_get_host_speed(struct Scsi_Host *shost)
2557{
2558	scsi_qla_host_t *vha = shost_priv(shost);
2559	u32 speed;
2560
2561	if (IS_QLAFX00(vha->hw)) {
2562		qlafx00_get_host_speed(shost);
2563		return;
2564	}
2565
2566	switch (vha->hw->link_data_rate) {
2567	case PORT_SPEED_1GB:
2568		speed = FC_PORTSPEED_1GBIT;
2569		break;
2570	case PORT_SPEED_2GB:
2571		speed = FC_PORTSPEED_2GBIT;
2572		break;
2573	case PORT_SPEED_4GB:
2574		speed = FC_PORTSPEED_4GBIT;
2575		break;
2576	case PORT_SPEED_8GB:
2577		speed = FC_PORTSPEED_8GBIT;
2578		break;
2579	case PORT_SPEED_10GB:
2580		speed = FC_PORTSPEED_10GBIT;
2581		break;
2582	case PORT_SPEED_16GB:
2583		speed = FC_PORTSPEED_16GBIT;
2584		break;
2585	case PORT_SPEED_32GB:
2586		speed = FC_PORTSPEED_32GBIT;
2587		break;
2588	case PORT_SPEED_64GB:
2589		speed = FC_PORTSPEED_64GBIT;
2590		break;
2591	default:
2592		speed = FC_PORTSPEED_UNKNOWN;
2593		break;
2594	}
2595
2596	fc_host_speed(shost) = speed;
2597}
2598
2599static void
2600qla2x00_get_host_port_type(struct Scsi_Host *shost)
2601{
2602	scsi_qla_host_t *vha = shost_priv(shost);
2603	uint32_t port_type;
2604
2605	if (vha->vp_idx) {
2606		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2607		return;
2608	}
2609	switch (vha->hw->current_topology) {
2610	case ISP_CFG_NL:
2611		port_type = FC_PORTTYPE_LPORT;
2612		break;
2613	case ISP_CFG_FL:
2614		port_type = FC_PORTTYPE_NLPORT;
2615		break;
2616	case ISP_CFG_N:
2617		port_type = FC_PORTTYPE_PTP;
2618		break;
2619	case ISP_CFG_F:
2620		port_type = FC_PORTTYPE_NPORT;
2621		break;
2622	default:
2623		port_type = FC_PORTTYPE_UNKNOWN;
2624		break;
2625	}
2626
2627	fc_host_port_type(shost) = port_type;
2628}
2629
2630static void
2631qla2x00_get_starget_node_name(struct scsi_target *starget)
2632{
2633	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2634	scsi_qla_host_t *vha = shost_priv(host);
2635	fc_port_t *fcport;
2636	u64 node_name = 0;
2637
2638	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2639		if (fcport->rport &&
2640		    starget->id == fcport->rport->scsi_target_id) {
2641			node_name = wwn_to_u64(fcport->node_name);
2642			break;
2643		}
2644	}
2645
2646	fc_starget_node_name(starget) = node_name;
2647}
2648
2649static void
2650qla2x00_get_starget_port_name(struct scsi_target *starget)
2651{
2652	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2653	scsi_qla_host_t *vha = shost_priv(host);
2654	fc_port_t *fcport;
2655	u64 port_name = 0;
2656
2657	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2658		if (fcport->rport &&
2659		    starget->id == fcport->rport->scsi_target_id) {
2660			port_name = wwn_to_u64(fcport->port_name);
2661			break;
2662		}
2663	}
2664
2665	fc_starget_port_name(starget) = port_name;
2666}
2667
2668static void
2669qla2x00_get_starget_port_id(struct scsi_target *starget)
2670{
2671	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2672	scsi_qla_host_t *vha = shost_priv(host);
2673	fc_port_t *fcport;
2674	uint32_t port_id = ~0U;
2675
2676	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2677		if (fcport->rport &&
2678		    starget->id == fcport->rport->scsi_target_id) {
2679			port_id = fcport->d_id.b.domain << 16 |
2680			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2681			break;
2682		}
2683	}
2684
2685	fc_starget_port_id(starget) = port_id;
2686}
2687
2688static inline void
2689qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2690{
2691	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2692
2693	rport->dev_loss_tmo = timeout ? timeout : 1;
2694
2695	if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
2696		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
2697					       rport->dev_loss_tmo);
2698}
2699
2700static void
2701qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2702{
2703	struct Scsi_Host *host = rport_to_shost(rport);
2704	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2705	unsigned long flags;
2706
2707	if (!fcport)
2708		return;
2709
2710	ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
2711	       DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
2712				rport->port_state));
2713
2714	/*
2715	 * Now that the rport has been deleted, set the fcport state to
2716	 * FCS_DEVICE_DEAD, if the fcport is still lost.
2717	 */
2718	if (fcport->scan_state != QLA_FCPORT_FOUND)
2719		qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2720
2721	/*
2722	 * Transport has effectively 'deleted' the rport, clear
2723	 * all local references.
2724	 */
2725	spin_lock_irqsave(host->host_lock, flags);
2726	/* Confirm port has not reappeared before clearing pointers. */
2727	if (rport->port_state != FC_PORTSTATE_ONLINE) {
2728		fcport->rport = NULL;
2729		*((fc_port_t **)rport->dd_data) = NULL;
2730	}
2731	spin_unlock_irqrestore(host->host_lock, flags);
2732
2733	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2734		return;
2735
2736	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2737		/* Will wait for wind down of adapter */
2738		ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
2739		    "%s pci offline detected (id %06x)\n", __func__,
2740		    fcport->d_id.b24);
2741		qla_pci_set_eeh_busy(fcport->vha);
2742		qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
2743		    0, WAIT_TARGET);
2744		return;
2745	}
2746}
2747
2748static void
2749qla2x00_terminate_rport_io(struct fc_rport *rport)
2750{
2751	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2752	scsi_qla_host_t *vha;
2753
2754	if (!fcport)
2755		return;
2756
2757	if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2758		return;
2759
2760	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2761		return;
2762	vha = fcport->vha;
2763
2764	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2765		/* Will wait for wind down of adapter */
2766		ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
2767		    "%s pci offline detected (id %06x)\n", __func__,
2768		    fcport->d_id.b24);
2769		qla_pci_set_eeh_busy(vha);
2770		qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
2771			0, WAIT_TARGET);
2772		return;
2773	}
2774	/*
2775	 * At this point all fcport's software-states are cleared.  Perform any
2776	 * final cleanup of firmware resources (PCBs and XCBs).
2777	 *
2778	 * Attempt to cleanup only lost devices.
2779	 */
2780	if (fcport->loop_id != FC_NO_LOOP_ID) {
2781		if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
2782		    fcport->scan_state != QLA_FCPORT_FOUND) {
2783			if (fcport->loop_id != FC_NO_LOOP_ID)
2784				fcport->logout_on_delete = 1;
2785
2786			if (!EDIF_NEGOTIATION_PENDING(fcport)) {
2787				ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
2788				       "%s %d schedule session deletion\n", __func__,
2789				       __LINE__);
2790				qlt_schedule_sess_for_deletion(fcport);
2791			}
2792		} else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
2793			qla2x00_port_logout(fcport->vha, fcport);
2794		}
2795	}
2796
2797	/* check for any straggling io left behind */
2798	if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
2799		ql_log(ql_log_warn, vha, 0x300b,
2800		       "IO not return.  Resetting. \n");
2801		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2802		qla2xxx_wake_dpc(vha);
2803		qla2x00_wait_for_chip_reset(vha);
2804	}
2805}
2806
2807static int
2808qla2x00_issue_lip(struct Scsi_Host *shost)
2809{
2810	scsi_qla_host_t *vha = shost_priv(shost);
2811
2812	if (IS_QLAFX00(vha->hw))
2813		return 0;
2814
2815	if (vha->hw->flags.port_isolated)
2816		return 0;
2817
2818	qla2x00_loop_reset(vha);
2819	return 0;
2820}
2821
2822static struct fc_host_statistics *
2823qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2824{
2825	scsi_qla_host_t *vha = shost_priv(shost);
2826	struct qla_hw_data *ha = vha->hw;
2827	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2828	int rval;
2829	struct link_statistics *stats;
2830	dma_addr_t stats_dma;
2831	struct fc_host_statistics *p = &vha->fc_host_stat;
2832	struct qla_qpair *qpair;
2833	int i;
2834	u64 ib = 0, ob = 0, ir = 0, or = 0;
2835
2836	memset(p, -1, sizeof(*p));
2837
2838	if (IS_QLAFX00(vha->hw))
2839		goto done;
2840
2841	if (test_bit(UNLOADING, &vha->dpc_flags))
2842		goto done;
2843
2844	if (unlikely(pci_channel_offline(ha->pdev)))
2845		goto done;
2846
2847	if (qla2x00_chip_is_down(vha))
2848		goto done;
2849
2850	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2851				   GFP_KERNEL);
2852	if (!stats) {
2853		ql_log(ql_log_warn, vha, 0x707d,
2854		    "Failed to allocate memory for stats.\n");
2855		goto done;
2856	}
2857
2858	rval = QLA_FUNCTION_FAILED;
2859	if (IS_FWI2_CAPABLE(ha)) {
2860		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2861	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2862	    !ha->dpc_active) {
2863		/* Must be in a 'READY' state for statistics retrieval. */
2864		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2865						stats, stats_dma);
2866	}
2867
2868	if (rval != QLA_SUCCESS)
2869		goto done_free;
2870
2871	/* --- */
2872	for (i = 0; i < vha->hw->max_qpairs; i++) {
2873		qpair = vha->hw->queue_pair_map[i];
2874		if (!qpair)
2875			continue;
2876		ir += qpair->counters.input_requests;
2877		or += qpair->counters.output_requests;
2878		ib += qpair->counters.input_bytes;
2879		ob += qpair->counters.output_bytes;
2880	}
2881	ir += ha->base_qpair->counters.input_requests;
2882	or += ha->base_qpair->counters.output_requests;
2883	ib += ha->base_qpair->counters.input_bytes;
2884	ob += ha->base_qpair->counters.output_bytes;
2885
2886	ir += vha->qla_stats.input_requests;
2887	or += vha->qla_stats.output_requests;
2888	ib += vha->qla_stats.input_bytes;
2889	ob += vha->qla_stats.output_bytes;
2890	/* --- */
2891
2892	p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
2893	p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
2894	p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
2895	p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
2896	p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
2897	p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
2898	if (IS_FWI2_CAPABLE(ha)) {
2899		p->lip_count = le32_to_cpu(stats->lip_cnt);
2900		p->tx_frames = le32_to_cpu(stats->tx_frames);
2901		p->rx_frames = le32_to_cpu(stats->rx_frames);
2902		p->dumped_frames = le32_to_cpu(stats->discarded_frames);
2903		p->nos_count = le32_to_cpu(stats->nos_rcvd);
2904		p->error_frames =
2905		    le32_to_cpu(stats->dropped_frames) +
2906		    le32_to_cpu(stats->discarded_frames);
2907		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2908			p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
2909			p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
2910		} else {
2911			p->rx_words = ib >> 2;
2912			p->tx_words = ob >> 2;
2913		}
2914	}
2915
2916	p->fcp_control_requests = vha->qla_stats.control_requests;
2917	p->fcp_input_requests = ir;
2918	p->fcp_output_requests = or;
2919	p->fcp_input_megabytes  = ib >> 20;
2920	p->fcp_output_megabytes = ob >> 20;
2921	p->seconds_since_last_reset =
2922	    get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2923	do_div(p->seconds_since_last_reset, HZ);
2924
2925done_free:
2926	dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2927	    stats, stats_dma);
2928done:
2929	return p;
2930}
2931
2932static void
2933qla2x00_reset_host_stats(struct Scsi_Host *shost)
2934{
2935	scsi_qla_host_t *vha = shost_priv(shost);
2936	struct qla_hw_data *ha = vha->hw;
2937	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2938	struct link_statistics *stats;
2939	dma_addr_t stats_dma;
2940	int i;
2941	struct qla_qpair *qpair;
2942
2943	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2944	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2945	for (i = 0; i < vha->hw->max_qpairs; i++) {
2946		qpair = vha->hw->queue_pair_map[i];
2947		if (!qpair)
2948			continue;
2949		memset(&qpair->counters, 0, sizeof(qpair->counters));
2950	}
2951	memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
2952
2953	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2954
2955	if (IS_FWI2_CAPABLE(ha)) {
2956		int rval;
2957
2958		stats = dma_alloc_coherent(&ha->pdev->dev,
2959		    sizeof(*stats), &stats_dma, GFP_KERNEL);
2960		if (!stats) {
2961			ql_log(ql_log_warn, vha, 0x70d7,
2962			    "Failed to allocate memory for stats.\n");
2963			return;
2964		}
2965
2966		/* reset firmware statistics */
2967		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2968		if (rval != QLA_SUCCESS)
2969			ql_log(ql_log_warn, vha, 0x70de,
2970			       "Resetting ISP statistics failed: rval = %d\n",
2971			       rval);
2972
2973		dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2974		    stats, stats_dma);
2975	}
2976}
2977
2978static void
2979qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2980{
2981	scsi_qla_host_t *vha = shost_priv(shost);
2982
2983	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2984	    sizeof(fc_host_symbolic_name(shost)));
2985}
2986
2987static void
2988qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2989{
2990	scsi_qla_host_t *vha = shost_priv(shost);
2991
2992	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2993}
2994
2995static void
2996qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2997{
2998	scsi_qla_host_t *vha = shost_priv(shost);
2999	static const uint8_t node_name[WWN_SIZE] = {
3000		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
3001	};
3002	u64 fabric_name = wwn_to_u64(node_name);
3003
3004	if (vha->device_flags & SWITCH_FOUND)
3005		fabric_name = wwn_to_u64(vha->fabric_node_name);
3006
3007	fc_host_fabric_name(shost) = fabric_name;
3008}
3009
3010static void
3011qla2x00_get_host_port_state(struct Scsi_Host *shost)
3012{
3013	scsi_qla_host_t *vha = shost_priv(shost);
3014	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
3015
3016	if (!base_vha->flags.online) {
3017		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
3018		return;
3019	}
3020
3021	switch (atomic_read(&base_vha->loop_state)) {
3022	case LOOP_UPDATE:
3023		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
3024		break;
3025	case LOOP_DOWN:
3026		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
3027			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
3028		else
3029			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
3030		break;
3031	case LOOP_DEAD:
3032		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
3033		break;
3034	case LOOP_READY:
3035		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
3036		break;
3037	default:
3038		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
3039		break;
3040	}
3041}
3042
3043static int
3044qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
3045{
3046	int	ret = 0;
3047	uint8_t	qos = 0;
3048	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
3049	scsi_qla_host_t *vha = NULL;
3050	struct qla_hw_data *ha = base_vha->hw;
3051	int	cnt;
3052	struct req_que *req = ha->req_q_map[0];
3053	struct qla_qpair *qpair;
3054
3055	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
3056	if (ret) {
3057		ql_log(ql_log_warn, vha, 0x707e,
3058		    "Vport sanity check failed, status %x\n", ret);
3059		return (ret);
3060	}
3061
3062	vha = qla24xx_create_vhost(fc_vport);
3063	if (vha == NULL) {
3064		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
3065		return FC_VPORT_FAILED;
3066	}
3067	if (disable) {
3068		atomic_set(&vha->vp_state, VP_OFFLINE);
3069		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
3070	} else
3071		atomic_set(&vha->vp_state, VP_FAILED);
3072
3073	/* ready to create vport */
3074	ql_log(ql_log_info, vha, 0x7080,
3075	    "VP entry id %d assigned.\n", vha->vp_idx);
3076
3077	/* initialized vport states */
3078	atomic_set(&vha->loop_state, LOOP_DOWN);
3079	vha->vp_err_state = VP_ERR_PORTDWN;
3080	vha->vp_prev_err_state = VP_ERR_UNKWN;
3081	/* Check if physical ha port is Up */
3082	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
3083	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
3084		/* Don't retry or attempt login of this virtual port */
3085		ql_dbg(ql_dbg_user, vha, 0x7081,
3086		    "Vport loop state is not UP.\n");
3087		atomic_set(&vha->loop_state, LOOP_DEAD);
3088		if (!disable)
3089			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
3090	}
3091
3092	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
3093		if (ha->fw_attributes & BIT_4) {
3094			int prot = 0, guard;
3095
3096			vha->flags.difdix_supported = 1;
3097			ql_dbg(ql_dbg_user, vha, 0x7082,
3098			    "Registered for DIF/DIX type 1 and 3 protection.\n");
3099			scsi_host_set_prot(vha->host,
3100			    prot | SHOST_DIF_TYPE1_PROTECTION
3101			    | SHOST_DIF_TYPE2_PROTECTION
3102			    | SHOST_DIF_TYPE3_PROTECTION
3103			    | SHOST_DIX_TYPE1_PROTECTION
3104			    | SHOST_DIX_TYPE2_PROTECTION
3105			    | SHOST_DIX_TYPE3_PROTECTION);
3106
3107			guard = SHOST_DIX_GUARD_CRC;
3108
3109			if (IS_PI_IPGUARD_CAPABLE(ha) &&
3110			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
3111				guard |= SHOST_DIX_GUARD_IP;
3112
3113			scsi_host_set_guard(vha->host, guard);
3114		} else
3115			vha->flags.difdix_supported = 0;
3116	}
3117
3118	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
3119				   &ha->pdev->dev)) {
3120		ql_dbg(ql_dbg_user, vha, 0x7083,
3121		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
3122		goto vport_create_failed_2;
3123	}
3124
3125	/* initialize attributes */
3126	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3127	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3128	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3129	fc_host_supported_classes(vha->host) =
3130		fc_host_supported_classes(base_vha->host);
3131	fc_host_supported_speeds(vha->host) =
3132		fc_host_supported_speeds(base_vha->host);
3133
3134	qlt_vport_create(vha, ha);
3135	qla24xx_vport_disable(fc_vport, disable);
3136
3137	if (!ql2xmqsupport || !ha->npiv_info)
3138		goto vport_queue;
3139
3140	/* Create a request queue in QoS mode for the vport */
3141	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
3142		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
3143			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
3144					8) == 0) {
3145			qos = ha->npiv_info[cnt].q_qos;
3146			break;
3147		}
3148	}
3149
3150	if (qos) {
3151		qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
3152		if (!qpair)
3153			ql_log(ql_log_warn, vha, 0x7084,
3154			    "Can't create qpair for VP[%d]\n",
3155			    vha->vp_idx);
3156		else {
3157			ql_dbg(ql_dbg_multiq, vha, 0xc001,
3158			    "Queue pair: %d Qos: %d) created for VP[%d]\n",
3159			    qpair->id, qos, vha->vp_idx);
3160			ql_dbg(ql_dbg_user, vha, 0x7085,
3161			    "Queue Pair: %d Qos: %d) created for VP[%d]\n",
3162			    qpair->id, qos, vha->vp_idx);
3163			req = qpair->req;
3164			vha->qpair = qpair;
3165		}
3166	}
3167
3168vport_queue:
3169	vha->req = req;
3170	return 0;
3171
3172vport_create_failed_2:
3173	qla24xx_disable_vp(vha);
3174	qla24xx_deallocate_vp_id(vha);
3175	scsi_host_put(vha->host);
3176	return FC_VPORT_FAILED;
3177}
3178
3179static int
3180qla24xx_vport_delete(struct fc_vport *fc_vport)
3181{
3182	scsi_qla_host_t *vha = fc_vport->dd_data;
3183	struct qla_hw_data *ha = vha->hw;
3184	uint16_t id = vha->vp_idx;
3185
3186	set_bit(VPORT_DELETE, &vha->dpc_flags);
3187
3188	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
3189		msleep(1000);
3190
3191
3192	qla24xx_disable_vp(vha);
3193	qla2x00_wait_for_sess_deletion(vha);
3194
3195	qla_nvme_delete(vha);
3196	qla_enode_stop(vha);
3197	qla_edb_stop(vha);
3198
3199	vha->flags.delete_progress = 1;
3200
3201	qlt_remove_target(ha, vha);
3202
3203	fc_remove_host(vha->host);
3204
3205	scsi_remove_host(vha->host);
3206
3207	/* Allow timer to run to drain queued items, when removing vp */
3208	qla24xx_deallocate_vp_id(vha);
3209
3210	if (vha->timer_active) {
3211		qla2x00_vp_stop_timer(vha);
3212		ql_dbg(ql_dbg_user, vha, 0x7086,
3213		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
3214	}
3215
3216	qla2x00_free_fcports(vha);
3217
3218	mutex_lock(&ha->vport_lock);
3219	ha->cur_vport_count--;
3220	clear_bit(vha->vp_idx, ha->vp_idx_map);
3221	mutex_unlock(&ha->vport_lock);
3222
3223	dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
3224	    vha->gnl.ldma);
3225
3226	vha->gnl.l = NULL;
3227
3228	vfree(vha->scan.l);
3229
3230	if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
3231		if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
3232			ql_log(ql_log_warn, vha, 0x7087,
3233			    "Queue Pair delete failed.\n");
3234	}
3235
3236	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
3237	scsi_host_put(vha->host);
3238	return 0;
3239}
3240
3241static int
3242qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
3243{
3244	scsi_qla_host_t *vha = fc_vport->dd_data;
3245
3246	if (disable)
3247		qla24xx_disable_vp(vha);
3248	else
3249		qla24xx_enable_vp(vha);
3250
3251	return 0;
3252}
3253
3254struct fc_function_template qla2xxx_transport_functions = {
3255
3256	.show_host_node_name = 1,
3257	.show_host_port_name = 1,
3258	.show_host_supported_classes = 1,
3259	.show_host_supported_speeds = 1,
3260
3261	.get_host_port_id = qla2x00_get_host_port_id,
3262	.show_host_port_id = 1,
3263	.get_host_speed = qla2x00_get_host_speed,
3264	.show_host_speed = 1,
3265	.get_host_port_type = qla2x00_get_host_port_type,
3266	.show_host_port_type = 1,
3267	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3268	.show_host_symbolic_name = 1,
3269	.set_host_system_hostname = qla2x00_set_host_system_hostname,
3270	.show_host_system_hostname = 1,
3271	.get_host_fabric_name = qla2x00_get_host_fabric_name,
3272	.show_host_fabric_name = 1,
3273	.get_host_port_state = qla2x00_get_host_port_state,
3274	.show_host_port_state = 1,
3275
3276	.dd_fcrport_size = sizeof(struct fc_port *),
3277	.show_rport_supported_classes = 1,
3278
3279	.get_starget_node_name = qla2x00_get_starget_node_name,
3280	.show_starget_node_name = 1,
3281	.get_starget_port_name = qla2x00_get_starget_port_name,
3282	.show_starget_port_name = 1,
3283	.get_starget_port_id  = qla2x00_get_starget_port_id,
3284	.show_starget_port_id = 1,
3285
3286	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3287	.show_rport_dev_loss_tmo = 1,
3288
3289	.issue_fc_host_lip = qla2x00_issue_lip,
3290	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3291	.terminate_rport_io = qla2x00_terminate_rport_io,
3292	.get_fc_host_stats = qla2x00_get_fc_host_stats,
3293	.reset_fc_host_stats = qla2x00_reset_host_stats,
3294
3295	.vport_create = qla24xx_vport_create,
3296	.vport_disable = qla24xx_vport_disable,
3297	.vport_delete = qla24xx_vport_delete,
3298	.bsg_request = qla24xx_bsg_request,
3299	.bsg_timeout = qla24xx_bsg_timeout,
3300};
3301
3302struct fc_function_template qla2xxx_transport_vport_functions = {
3303
3304	.show_host_node_name = 1,
3305	.show_host_port_name = 1,
3306	.show_host_supported_classes = 1,
3307
3308	.get_host_port_id = qla2x00_get_host_port_id,
3309	.show_host_port_id = 1,
3310	.get_host_speed = qla2x00_get_host_speed,
3311	.show_host_speed = 1,
3312	.get_host_port_type = qla2x00_get_host_port_type,
3313	.show_host_port_type = 1,
3314	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3315	.show_host_symbolic_name = 1,
3316	.set_host_system_hostname = qla2x00_set_host_system_hostname,
3317	.show_host_system_hostname = 1,
3318	.get_host_fabric_name = qla2x00_get_host_fabric_name,
3319	.show_host_fabric_name = 1,
3320	.get_host_port_state = qla2x00_get_host_port_state,
3321	.show_host_port_state = 1,
3322
3323	.dd_fcrport_size = sizeof(struct fc_port *),
3324	.show_rport_supported_classes = 1,
3325
3326	.get_starget_node_name = qla2x00_get_starget_node_name,
3327	.show_starget_node_name = 1,
3328	.get_starget_port_name = qla2x00_get_starget_port_name,
3329	.show_starget_port_name = 1,
3330	.get_starget_port_id  = qla2x00_get_starget_port_id,
3331	.show_starget_port_id = 1,
3332
3333	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3334	.show_rport_dev_loss_tmo = 1,
3335
3336	.issue_fc_host_lip = qla2x00_issue_lip,
3337	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3338	.terminate_rport_io = qla2x00_terminate_rport_io,
3339	.get_fc_host_stats = qla2x00_get_fc_host_stats,
3340	.reset_fc_host_stats = qla2x00_reset_host_stats,
3341
3342	.bsg_request = qla24xx_bsg_request,
3343	.bsg_timeout = qla24xx_bsg_timeout,
3344};
3345
3346static uint
3347qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
3348{
3349	uint supported_speeds = FC_PORTSPEED_UNKNOWN;
3350
3351	if (speeds & FDMI_PORT_SPEED_64GB)
3352		supported_speeds |= FC_PORTSPEED_64GBIT;
3353	if (speeds & FDMI_PORT_SPEED_32GB)
3354		supported_speeds |= FC_PORTSPEED_32GBIT;
3355	if (speeds & FDMI_PORT_SPEED_16GB)
3356		supported_speeds |= FC_PORTSPEED_16GBIT;
3357	if (speeds & FDMI_PORT_SPEED_8GB)
3358		supported_speeds |= FC_PORTSPEED_8GBIT;
3359	if (speeds & FDMI_PORT_SPEED_4GB)
3360		supported_speeds |= FC_PORTSPEED_4GBIT;
3361	if (speeds & FDMI_PORT_SPEED_2GB)
3362		supported_speeds |= FC_PORTSPEED_2GBIT;
3363	if (speeds & FDMI_PORT_SPEED_1GB)
3364		supported_speeds |= FC_PORTSPEED_1GBIT;
3365
3366	return supported_speeds;
3367}
3368
3369void
3370qla2x00_init_host_attr(scsi_qla_host_t *vha)
3371{
3372	struct qla_hw_data *ha = vha->hw;
3373	u32 speeds = 0, fdmi_speed = 0;
3374
3375	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3376	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3377	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3378	fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3379			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3380	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3381	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3382
3383	fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
3384	speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
3385
3386	fc_host_supported_speeds(vha->host) = speeds;
3387}
3388