1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 *         Leo Duran <leo.duran@amd.com>
6 */
7
8#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
9#define _ASM_X86_AMD_IOMMU_TYPES_H
10
11#include <linux/iommu.h>
12#include <linux/types.h>
13#include <linux/mmu_notifier.h>
14#include <linux/mutex.h>
15#include <linux/msi.h>
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/pci.h>
19#include <linux/irqreturn.h>
20#include <linux/io-pgtable.h>
21
22/*
23 * Maximum number of IOMMUs supported
24 */
25#define MAX_IOMMUS	32
26
27/*
28 * some size calculation constants
29 */
30#define DEV_TABLE_ENTRY_SIZE		32
31#define ALIAS_TABLE_ENTRY_SIZE		2
32#define RLOOKUP_TABLE_ENTRY_SIZE	(sizeof(void *))
33
34/* Capability offsets used by the driver */
35#define MMIO_CAP_HDR_OFFSET	0x00
36#define MMIO_RANGE_OFFSET	0x0c
37#define MMIO_MISC_OFFSET	0x10
38
39/* Masks, shifts and macros to parse the device range capability */
40#define MMIO_RANGE_LD_MASK	0xff000000
41#define MMIO_RANGE_FD_MASK	0x00ff0000
42#define MMIO_RANGE_BUS_MASK	0x0000ff00
43#define MMIO_RANGE_LD_SHIFT	24
44#define MMIO_RANGE_FD_SHIFT	16
45#define MMIO_RANGE_BUS_SHIFT	8
46#define MMIO_GET_LD(x)  (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
47#define MMIO_GET_FD(x)  (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
48#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
49#define MMIO_MSI_NUM(x)	((x) & 0x1f)
50
51/* Flag masks for the AMD IOMMU exclusion range */
52#define MMIO_EXCL_ENABLE_MASK 0x01ULL
53#define MMIO_EXCL_ALLOW_MASK  0x02ULL
54
55/* Used offsets into the MMIO space */
56#define MMIO_DEV_TABLE_OFFSET   0x0000
57#define MMIO_CMD_BUF_OFFSET     0x0008
58#define MMIO_EVT_BUF_OFFSET     0x0010
59#define MMIO_CONTROL_OFFSET     0x0018
60#define MMIO_EXCL_BASE_OFFSET   0x0020
61#define MMIO_EXCL_LIMIT_OFFSET  0x0028
62#define MMIO_EXT_FEATURES	0x0030
63#define MMIO_PPR_LOG_OFFSET	0x0038
64#define MMIO_GA_LOG_BASE_OFFSET	0x00e0
65#define MMIO_GA_LOG_TAIL_OFFSET	0x00e8
66#define MMIO_MSI_ADDR_LO_OFFSET	0x015C
67#define MMIO_MSI_ADDR_HI_OFFSET	0x0160
68#define MMIO_MSI_DATA_OFFSET	0x0164
69#define MMIO_INTCAPXT_EVT_OFFSET	0x0170
70#define MMIO_INTCAPXT_PPR_OFFSET	0x0178
71#define MMIO_INTCAPXT_GALOG_OFFSET	0x0180
72#define MMIO_EXT_FEATURES2	0x01A0
73#define MMIO_CMD_HEAD_OFFSET	0x2000
74#define MMIO_CMD_TAIL_OFFSET	0x2008
75#define MMIO_EVT_HEAD_OFFSET	0x2010
76#define MMIO_EVT_TAIL_OFFSET	0x2018
77#define MMIO_STATUS_OFFSET	0x2020
78#define MMIO_PPR_HEAD_OFFSET	0x2030
79#define MMIO_PPR_TAIL_OFFSET	0x2038
80#define MMIO_GA_HEAD_OFFSET	0x2040
81#define MMIO_GA_TAIL_OFFSET	0x2048
82#define MMIO_CNTR_CONF_OFFSET	0x4000
83#define MMIO_CNTR_REG_OFFSET	0x40000
84#define MMIO_REG_END_OFFSET	0x80000
85
86
87
88/* Extended Feature Bits */
89#define FEATURE_PREFETCH	BIT_ULL(0)
90#define FEATURE_PPR		BIT_ULL(1)
91#define FEATURE_X2APIC		BIT_ULL(2)
92#define FEATURE_NX		BIT_ULL(3)
93#define FEATURE_GT		BIT_ULL(4)
94#define FEATURE_IA		BIT_ULL(6)
95#define FEATURE_GA		BIT_ULL(7)
96#define FEATURE_HE		BIT_ULL(8)
97#define FEATURE_PC		BIT_ULL(9)
98#define FEATURE_GATS_SHIFT	(12)
99#define FEATURE_GATS_MASK	(3ULL)
100#define FEATURE_GAM_VAPIC	BIT_ULL(21)
101#define FEATURE_GIOSUP		BIT_ULL(48)
102#define FEATURE_HASUP		BIT_ULL(49)
103#define FEATURE_EPHSUP		BIT_ULL(50)
104#define FEATURE_HDSUP		BIT_ULL(52)
105#define FEATURE_SNP		BIT_ULL(63)
106
107#define FEATURE_PASID_SHIFT	32
108#define FEATURE_PASID_MASK	(0x1fULL << FEATURE_PASID_SHIFT)
109
110#define FEATURE_GLXVAL_SHIFT	14
111#define FEATURE_GLXVAL_MASK	(0x03ULL << FEATURE_GLXVAL_SHIFT)
112
113/* Extended Feature 2 Bits */
114#define FEATURE_SNPAVICSUP_SHIFT	5
115#define FEATURE_SNPAVICSUP_MASK		(0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
116#define FEATURE_SNPAVICSUP_GAM(x) \
117	((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
118
119/* Note:
120 * The current driver only support 16-bit PASID.
121 * Currently, hardware only implement upto 16-bit PASID
122 * even though the spec says it could have upto 20 bits.
123 */
124#define PASID_MASK		0x0000ffff
125
126/* MMIO status bits */
127#define MMIO_STATUS_EVT_OVERFLOW_MASK		BIT(0)
128#define MMIO_STATUS_EVT_INT_MASK		BIT(1)
129#define MMIO_STATUS_COM_WAIT_INT_MASK		BIT(2)
130#define MMIO_STATUS_EVT_RUN_MASK		BIT(3)
131#define MMIO_STATUS_PPR_OVERFLOW_MASK		BIT(5)
132#define MMIO_STATUS_PPR_INT_MASK		BIT(6)
133#define MMIO_STATUS_PPR_RUN_MASK		BIT(7)
134#define MMIO_STATUS_GALOG_RUN_MASK		BIT(8)
135#define MMIO_STATUS_GALOG_OVERFLOW_MASK		BIT(9)
136#define MMIO_STATUS_GALOG_INT_MASK		BIT(10)
137
138/* event logging constants */
139#define EVENT_ENTRY_SIZE	0x10
140#define EVENT_TYPE_SHIFT	28
141#define EVENT_TYPE_MASK		0xf
142#define EVENT_TYPE_ILL_DEV	0x1
143#define EVENT_TYPE_IO_FAULT	0x2
144#define EVENT_TYPE_DEV_TAB_ERR	0x3
145#define EVENT_TYPE_PAGE_TAB_ERR	0x4
146#define EVENT_TYPE_ILL_CMD	0x5
147#define EVENT_TYPE_CMD_HARD_ERR	0x6
148#define EVENT_TYPE_IOTLB_INV_TO	0x7
149#define EVENT_TYPE_INV_DEV_REQ	0x8
150#define EVENT_TYPE_INV_PPR_REQ	0x9
151#define EVENT_TYPE_RMP_FAULT	0xd
152#define EVENT_TYPE_RMP_HW_ERR	0xe
153#define EVENT_DEVID_MASK	0xffff
154#define EVENT_DEVID_SHIFT	0
155#define EVENT_DOMID_MASK_LO	0xffff
156#define EVENT_DOMID_MASK_HI	0xf0000
157#define EVENT_FLAGS_MASK	0xfff
158#define EVENT_FLAGS_SHIFT	0x10
159#define EVENT_FLAG_RW		0x020
160#define EVENT_FLAG_I		0x008
161
162/* feature control bits */
163#define CONTROL_IOMMU_EN	0
164#define CONTROL_HT_TUN_EN	1
165#define CONTROL_EVT_LOG_EN	2
166#define CONTROL_EVT_INT_EN	3
167#define CONTROL_COMWAIT_EN	4
168#define CONTROL_INV_TIMEOUT	5
169#define CONTROL_PASSPW_EN	8
170#define CONTROL_RESPASSPW_EN	9
171#define CONTROL_COHERENT_EN	10
172#define CONTROL_ISOC_EN		11
173#define CONTROL_CMDBUF_EN	12
174#define CONTROL_PPRLOG_EN	13
175#define CONTROL_PPRINT_EN	14
176#define CONTROL_PPR_EN		15
177#define CONTROL_GT_EN		16
178#define CONTROL_GA_EN		17
179#define CONTROL_GAM_EN		25
180#define CONTROL_GALOG_EN	28
181#define CONTROL_GAINT_EN	29
182#define CONTROL_XT_EN		50
183#define CONTROL_INTCAPXT_EN	51
184#define CONTROL_IRTCACHEDIS	59
185#define CONTROL_SNPAVIC_EN	61
186
187#define CTRL_INV_TO_MASK	(7 << CONTROL_INV_TIMEOUT)
188#define CTRL_INV_TO_NONE	0
189#define CTRL_INV_TO_1MS		1
190#define CTRL_INV_TO_10MS	2
191#define CTRL_INV_TO_100MS	3
192#define CTRL_INV_TO_1S		4
193#define CTRL_INV_TO_10S		5
194#define CTRL_INV_TO_100S	6
195
196/* command specific defines */
197#define CMD_COMPL_WAIT          0x01
198#define CMD_INV_DEV_ENTRY       0x02
199#define CMD_INV_IOMMU_PAGES	0x03
200#define CMD_INV_IOTLB_PAGES	0x04
201#define CMD_INV_IRT		0x05
202#define CMD_COMPLETE_PPR	0x07
203#define CMD_INV_ALL		0x08
204
205#define CMD_COMPL_WAIT_STORE_MASK	0x01
206#define CMD_COMPL_WAIT_INT_MASK		0x02
207#define CMD_INV_IOMMU_PAGES_SIZE_MASK	0x01
208#define CMD_INV_IOMMU_PAGES_PDE_MASK	0x02
209#define CMD_INV_IOMMU_PAGES_GN_MASK	0x04
210
211#define PPR_STATUS_MASK			0xf
212#define PPR_STATUS_SHIFT		12
213
214#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS	0x7fffffffffffffffULL
215
216/* macros and definitions for device table entries */
217#define DEV_ENTRY_VALID         0x00
218#define DEV_ENTRY_TRANSLATION   0x01
219#define DEV_ENTRY_HAD           0x07
220#define DEV_ENTRY_PPR           0x34
221#define DEV_ENTRY_IR            0x3d
222#define DEV_ENTRY_IW            0x3e
223#define DEV_ENTRY_NO_PAGE_FAULT	0x62
224#define DEV_ENTRY_EX            0x67
225#define DEV_ENTRY_SYSMGT1       0x68
226#define DEV_ENTRY_SYSMGT2       0x69
227#define DEV_ENTRY_IRQ_TBL_EN	0x80
228#define DEV_ENTRY_INIT_PASS     0xb8
229#define DEV_ENTRY_EINT_PASS     0xb9
230#define DEV_ENTRY_NMI_PASS      0xba
231#define DEV_ENTRY_LINT0_PASS    0xbe
232#define DEV_ENTRY_LINT1_PASS    0xbf
233#define DEV_ENTRY_MODE_MASK	0x07
234#define DEV_ENTRY_MODE_SHIFT	0x09
235
236#define MAX_DEV_TABLE_ENTRIES	0xffff
237
238/* constants to configure the command buffer */
239#define CMD_BUFFER_SIZE    8192
240#define CMD_BUFFER_UNINITIALIZED 1
241#define CMD_BUFFER_ENTRIES 512
242#define MMIO_CMD_SIZE_SHIFT 56
243#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
244
245/* constants for event buffer handling */
246#define EVT_BUFFER_SIZE		8192 /* 512 entries */
247#define EVT_LEN_MASK		(0x9ULL << 56)
248
249/* Constants for PPR Log handling */
250#define PPR_LOG_ENTRIES		512
251#define PPR_LOG_SIZE_SHIFT	56
252#define PPR_LOG_SIZE_512	(0x9ULL << PPR_LOG_SIZE_SHIFT)
253#define PPR_ENTRY_SIZE		16
254#define PPR_LOG_SIZE		(PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
255
256/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */
257#define PPR_FLAG_EXEC		0x002	/* Execute permission requested */
258#define PPR_FLAG_READ		0x004	/* Read permission requested */
259#define PPR_FLAG_WRITE		0x020	/* Write permission requested */
260#define PPR_FLAG_US		0x040	/* 1: User, 0: Supervisor */
261#define PPR_FLAG_RVSD		0x080	/* Reserved bit not zero */
262#define PPR_FLAG_GN		0x100	/* GVA and PASID is valid */
263
264#define PPR_REQ_TYPE(x)		(((x) >> 60) & 0xfULL)
265#define PPR_FLAGS(x)		(((x) >> 48) & 0xfffULL)
266#define PPR_DEVID(x)		((x) & 0xffffULL)
267#define PPR_TAG(x)		(((x) >> 32) & 0x3ffULL)
268#define PPR_PASID1(x)		(((x) >> 16) & 0xffffULL)
269#define PPR_PASID2(x)		(((x) >> 42) & 0xfULL)
270#define PPR_PASID(x)		((PPR_PASID2(x) << 16) | PPR_PASID1(x))
271
272#define PPR_REQ_FAULT		0x01
273
274/* Constants for GA Log handling */
275#define GA_LOG_ENTRIES		512
276#define GA_LOG_SIZE_SHIFT	56
277#define GA_LOG_SIZE_512		(0x8ULL << GA_LOG_SIZE_SHIFT)
278#define GA_ENTRY_SIZE		8
279#define GA_LOG_SIZE		(GA_ENTRY_SIZE * GA_LOG_ENTRIES)
280
281#define GA_TAG(x)		(u32)(x & 0xffffffffULL)
282#define GA_DEVID(x)		(u16)(((x) >> 32) & 0xffffULL)
283#define GA_REQ_TYPE(x)		(((x) >> 60) & 0xfULL)
284
285#define GA_GUEST_NR		0x1
286
287#define IOMMU_IN_ADDR_BIT_SIZE  52
288#define IOMMU_OUT_ADDR_BIT_SIZE 52
289
290/*
291 * This bitmap is used to advertise the page sizes our hardware support
292 * to the IOMMU core, which will then use this information to split
293 * physically contiguous memory regions it is mapping into page sizes
294 * that we support.
295 *
296 * 512GB Pages are not supported due to a hardware bug
297 */
298#define AMD_IOMMU_PGSIZES	((~0xFFFUL) & ~(2ULL << 38))
299/* 4K, 2MB, 1G page sizes are supported */
300#define AMD_IOMMU_PGSIZES_V2	(PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
301
302/* Bit value definition for dte irq remapping fields*/
303#define DTE_IRQ_PHYS_ADDR_MASK		GENMASK_ULL(51, 6)
304#define DTE_IRQ_REMAP_INTCTL_MASK	(0x3ULL << 60)
305#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
306#define DTE_IRQ_REMAP_ENABLE    1ULL
307
308/*
309 * AMD IOMMU hardware only support 512 IRTEs despite
310 * the architectural limitation of 2048 entries.
311 */
312#define DTE_INTTAB_ALIGNMENT    128
313#define DTE_INTTABLEN_VALUE     9ULL
314#define DTE_INTTABLEN           (DTE_INTTABLEN_VALUE << 1)
315#define DTE_INTTABLEN_MASK      (0xfULL << 1)
316#define MAX_IRQS_PER_TABLE      (1 << DTE_INTTABLEN_VALUE)
317
318#define PAGE_MODE_NONE    0x00
319#define PAGE_MODE_1_LEVEL 0x01
320#define PAGE_MODE_2_LEVEL 0x02
321#define PAGE_MODE_3_LEVEL 0x03
322#define PAGE_MODE_4_LEVEL 0x04
323#define PAGE_MODE_5_LEVEL 0x05
324#define PAGE_MODE_6_LEVEL 0x06
325#define PAGE_MODE_7_LEVEL 0x07
326
327#define GUEST_PGTABLE_4_LEVEL	0x00
328#define GUEST_PGTABLE_5_LEVEL	0x01
329
330#define PM_LEVEL_SHIFT(x)	(12 + ((x) * 9))
331#define PM_LEVEL_SIZE(x)	(((x) < 6) ? \
332				  ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
333				   (0xffffffffffffffffULL))
334#define PM_LEVEL_INDEX(x, a)	(((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
335#define PM_LEVEL_ENC(x)		(((x) << 9) & 0xe00ULL)
336#define PM_LEVEL_PDE(x, a)	((a) | PM_LEVEL_ENC((x)) | \
337				 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
338#define PM_PTE_LEVEL(pte)	(((pte) >> 9) & 0x7ULL)
339
340#define PM_MAP_4k		0
341#define PM_ADDR_MASK		0x000ffffffffff000ULL
342#define PM_MAP_MASK(lvl)	(PM_ADDR_MASK & \
343				(~((1ULL << (12 + ((lvl) * 9))) - 1)))
344#define PM_ALIGNED(lvl, addr)	((PM_MAP_MASK(lvl) & (addr)) == (addr))
345
346/*
347 * Returns the page table level to use for a given page size
348 * Pagesize is expected to be a power-of-two
349 */
350#define PAGE_SIZE_LEVEL(pagesize) \
351		((__ffs(pagesize) - 12) / 9)
352/*
353 * Returns the number of ptes to use for a given page size
354 * Pagesize is expected to be a power-of-two
355 */
356#define PAGE_SIZE_PTE_COUNT(pagesize) \
357		(1ULL << ((__ffs(pagesize) - 12) % 9))
358
359/*
360 * Aligns a given io-virtual address to a given page size
361 * Pagesize is expected to be a power-of-two
362 */
363#define PAGE_SIZE_ALIGN(address, pagesize) \
364		((address) & ~((pagesize) - 1))
365/*
366 * Creates an IOMMU PTE for an address and a given pagesize
367 * The PTE has no permission bits set
368 * Pagesize is expected to be a power-of-two larger than 4096
369 */
370#define PAGE_SIZE_PTE(address, pagesize)		\
371		(((address) | ((pagesize) - 1)) &	\
372		 (~(pagesize >> 1)) & PM_ADDR_MASK)
373
374/*
375 * Takes a PTE value with mode=0x07 and returns the page size it maps
376 */
377#define PTE_PAGE_SIZE(pte) \
378	(1ULL << (1 + ffz(((pte) | 0xfffULL))))
379
380/*
381 * Takes a page-table level and returns the default page-size for this level
382 */
383#define PTE_LEVEL_PAGE_SIZE(level)			\
384	(1ULL << (12 + (9 * (level))))
385
386/*
387 * The IOPTE dirty bit
388 */
389#define IOMMU_PTE_HD_BIT (6)
390
391/*
392 * Bit value definition for I/O PTE fields
393 */
394#define IOMMU_PTE_PR	BIT_ULL(0)
395#define IOMMU_PTE_HD	BIT_ULL(IOMMU_PTE_HD_BIT)
396#define IOMMU_PTE_U	BIT_ULL(59)
397#define IOMMU_PTE_FC	BIT_ULL(60)
398#define IOMMU_PTE_IR	BIT_ULL(61)
399#define IOMMU_PTE_IW	BIT_ULL(62)
400
401/*
402 * Bit value definition for DTE fields
403 */
404#define DTE_FLAG_V	BIT_ULL(0)
405#define DTE_FLAG_TV	BIT_ULL(1)
406#define DTE_FLAG_HAD	(3ULL << 7)
407#define DTE_FLAG_GIOV	BIT_ULL(54)
408#define DTE_FLAG_GV	BIT_ULL(55)
409#define DTE_GLX_SHIFT	(56)
410#define DTE_GLX_MASK	(3)
411#define DTE_FLAG_IR	BIT_ULL(61)
412#define DTE_FLAG_IW	BIT_ULL(62)
413
414#define DTE_FLAG_IOTLB	BIT_ULL(32)
415#define DTE_FLAG_MASK	(0x3ffULL << 32)
416#define DEV_DOMID_MASK	0xffffULL
417
418#define DTE_GCR3_VAL_A(x)	(((x) >> 12) & 0x00007ULL)
419#define DTE_GCR3_VAL_B(x)	(((x) >> 15) & 0x0ffffULL)
420#define DTE_GCR3_VAL_C(x)	(((x) >> 31) & 0x1fffffULL)
421
422#define DTE_GCR3_INDEX_A	0
423#define DTE_GCR3_INDEX_B	1
424#define DTE_GCR3_INDEX_C	1
425
426#define DTE_GCR3_SHIFT_A	58
427#define DTE_GCR3_SHIFT_B	16
428#define DTE_GCR3_SHIFT_C	43
429
430#define DTE_GPT_LEVEL_SHIFT	54
431
432#define GCR3_VALID		0x01ULL
433
434#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
435#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
436#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
437#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
438#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
439
440#define IOMMU_PROT_MASK 0x03
441#define IOMMU_PROT_IR 0x01
442#define IOMMU_PROT_IW 0x02
443
444#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE	(1 << 2)
445
446/* IOMMU capabilities */
447#define IOMMU_CAP_IOTLB   24
448#define IOMMU_CAP_NPCACHE 26
449#define IOMMU_CAP_EFR     27
450
451/* IOMMU IVINFO */
452#define IOMMU_IVINFO_OFFSET     36
453#define IOMMU_IVINFO_EFRSUP     BIT(0)
454#define IOMMU_IVINFO_DMA_REMAP  BIT(1)
455
456/* IOMMU Feature Reporting Field (for IVHD type 10h */
457#define IOMMU_FEAT_GASUP_SHIFT	6
458
459/* IOMMU Extended Feature Register (EFR) */
460#define IOMMU_EFR_XTSUP_SHIFT	2
461#define IOMMU_EFR_GASUP_SHIFT	7
462#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT	46
463
464#define MAX_DOMAIN_ID 65536
465
466/* Timeout stuff */
467#define LOOP_TIMEOUT		100000
468#define MMIO_STATUS_TIMEOUT	2000000
469
470extern bool amd_iommu_dump;
471#define DUMP_printk(format, arg...)				\
472	do {							\
473		if (amd_iommu_dump)				\
474			pr_info("AMD-Vi: " format, ## arg);	\
475	} while(0);
476
477/* global flag if IOMMUs cache non-present entries */
478extern bool amd_iommu_np_cache;
479/* Only true if all IOMMUs support device IOTLBs */
480extern bool amd_iommu_iotlb_sup;
481
482struct irq_remap_table {
483	raw_spinlock_t lock;
484	unsigned min_index;
485	u32 *table;
486};
487
488/* Interrupt remapping feature used? */
489extern bool amd_iommu_irq_remap;
490
491extern const struct iommu_ops amd_iommu_ops;
492
493/* IVRS indicates that pre-boot remapping was enabled */
494extern bool amdr_ivrs_remap_support;
495
496/* kmem_cache to get tables with 128 byte alignement */
497extern struct kmem_cache *amd_iommu_irq_cache;
498
499#define PCI_SBDF_TO_SEGID(sbdf)		(((sbdf) >> 16) & 0xffff)
500#define PCI_SBDF_TO_DEVID(sbdf)		((sbdf) & 0xffff)
501#define PCI_SEG_DEVID_TO_SBDF(seg, devid)	((((u32)(seg) & 0xffff) << 16) | \
502						 ((devid) & 0xffff))
503
504/* Make iterating over all pci segment easier */
505#define for_each_pci_segment(pci_seg) \
506	list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
507#define for_each_pci_segment_safe(pci_seg, next) \
508	list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
509/*
510 * Make iterating over all IOMMUs easier
511 */
512#define for_each_iommu(iommu) \
513	list_for_each_entry((iommu), &amd_iommu_list, list)
514#define for_each_iommu_safe(iommu, next) \
515	list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
516/* Making iterating over protection_domain->dev_data_list easier */
517#define for_each_pdom_dev_data(pdom_dev_data, pdom) \
518	list_for_each_entry(pdom_dev_data, &pdom->dev_data_list, list)
519#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
520	list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
521
522struct amd_iommu;
523struct iommu_domain;
524struct irq_domain;
525struct amd_irte_ops;
526
527#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED      (1 << 0)
528
529#define io_pgtable_to_data(x) \
530	container_of((x), struct amd_io_pgtable, iop)
531
532#define io_pgtable_ops_to_data(x) \
533	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
534
535#define io_pgtable_ops_to_domain(x) \
536	container_of(io_pgtable_ops_to_data(x), \
537		     struct protection_domain, iop)
538
539#define io_pgtable_cfg_to_data(x) \
540	container_of((x), struct amd_io_pgtable, pgtbl_cfg)
541
542struct gcr3_tbl_info {
543	u64	*gcr3_tbl;	/* Guest CR3 table */
544	int	glx;		/* Number of levels for GCR3 table */
545	u32	pasid_cnt;	/* Track attached PASIDs */
546	u16	domid;		/* Per device domain ID */
547};
548
549struct amd_io_pgtable {
550	struct io_pgtable_cfg	pgtbl_cfg;
551	struct io_pgtable	iop;
552	int			mode;
553	u64			*root;
554	u64			*pgd;		/* v2 pgtable pgd pointer */
555};
556
557enum protection_domain_mode {
558	PD_MODE_V1 = 1,
559	PD_MODE_V2,
560};
561
562/* Track dev_data/PASID list for the protection domain */
563struct pdom_dev_data {
564	/* Points to attached device data */
565	struct iommu_dev_data *dev_data;
566	/* PASID attached to the protection domain */
567	ioasid_t pasid;
568	/* For protection_domain->dev_data_list */
569	struct list_head list;
570};
571
572/*
573 * This structure contains generic data for  IOMMU protection domains
574 * independent of their use.
575 */
576struct protection_domain {
577	struct list_head dev_list; /* List of all devices in this domain */
578	struct iommu_domain domain; /* generic domain handle used by
579				       iommu core code */
580	struct amd_io_pgtable iop;
581	spinlock_t lock;	/* mostly used to lock the page table*/
582	u16 id;			/* the domain id written to the device table */
583	int nid;		/* Node ID */
584	enum protection_domain_mode pd_mode; /* Track page table type */
585	bool dirty_tracking;	/* dirty tracking is enabled in the domain */
586	unsigned dev_cnt;	/* devices assigned to this domain */
587	unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
588
589	struct mmu_notifier mn;	/* mmu notifier for the SVA domain */
590	struct list_head dev_data_list; /* List of pdom_dev_data */
591};
592
593/*
594 * This structure contains information about one PCI segment in the system.
595 */
596struct amd_iommu_pci_seg {
597	/* List with all PCI segments in the system */
598	struct list_head list;
599
600	/* List of all available dev_data structures */
601	struct llist_head dev_data_list;
602
603	/* PCI segment number */
604	u16 id;
605
606	/* Largest PCI device id we expect translation requests for */
607	u16 last_bdf;
608
609	/* Size of the device table */
610	u32 dev_table_size;
611
612	/* Size of the alias table */
613	u32 alias_table_size;
614
615	/* Size of the rlookup table */
616	u32 rlookup_table_size;
617
618	/*
619	 * device table virtual address
620	 *
621	 * Pointer to the per PCI segment device table.
622	 * It is indexed by the PCI device id or the HT unit id and contains
623	 * information about the domain the device belongs to as well as the
624	 * page table root pointer.
625	 */
626	struct dev_table_entry *dev_table;
627
628	/*
629	 * The rlookup iommu table is used to find the IOMMU which is
630	 * responsible for a specific device. It is indexed by the PCI
631	 * device id.
632	 */
633	struct amd_iommu **rlookup_table;
634
635	/*
636	 * This table is used to find the irq remapping table for a given
637	 * device id quickly.
638	 */
639	struct irq_remap_table **irq_lookup_table;
640
641	/*
642	 * Pointer to a device table which the content of old device table
643	 * will be copied to. It's only be used in kdump kernel.
644	 */
645	struct dev_table_entry *old_dev_tbl_cpy;
646
647	/*
648	 * The alias table is a driver specific data structure which contains the
649	 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
650	 * More than one device can share the same requestor id.
651	 */
652	u16 *alias_table;
653
654	/*
655	 * A list of required unity mappings we find in ACPI. It is not locked
656	 * because as runtime it is only read. It is created at ACPI table
657	 * parsing time.
658	 */
659	struct list_head unity_map;
660};
661
662/*
663 * Structure where we save information about one hardware AMD IOMMU in the
664 * system.
665 */
666struct amd_iommu {
667	struct list_head list;
668
669	/* Index within the IOMMU array */
670	int index;
671
672	/* locks the accesses to the hardware */
673	raw_spinlock_t lock;
674
675	/* Pointer to PCI device of this IOMMU */
676	struct pci_dev *dev;
677
678	/* Cache pdev to root device for resume quirks */
679	struct pci_dev *root_pdev;
680
681	/* physical address of MMIO space */
682	u64 mmio_phys;
683
684	/* physical end address of MMIO space */
685	u64 mmio_phys_end;
686
687	/* virtual address of MMIO space */
688	u8 __iomem *mmio_base;
689
690	/* capabilities of that IOMMU read from ACPI */
691	u32 cap;
692
693	/* flags read from acpi table */
694	u8 acpi_flags;
695
696	/* Extended features */
697	u64 features;
698
699	/* Extended features 2 */
700	u64 features2;
701
702	/* PCI device id of the IOMMU device */
703	u16 devid;
704
705	/*
706	 * Capability pointer. There could be more than one IOMMU per PCI
707	 * device function if there are more than one AMD IOMMU capability
708	 * pointers.
709	 */
710	u16 cap_ptr;
711
712	/* pci domain of this IOMMU */
713	struct amd_iommu_pci_seg *pci_seg;
714
715	/* start of exclusion range of that IOMMU */
716	u64 exclusion_start;
717	/* length of exclusion range of that IOMMU */
718	u64 exclusion_length;
719
720	/* command buffer virtual address */
721	u8 *cmd_buf;
722	u32 cmd_buf_head;
723	u32 cmd_buf_tail;
724
725	/* event buffer virtual address */
726	u8 *evt_buf;
727
728	/* Name for event log interrupt */
729	unsigned char evt_irq_name[16];
730
731	/* Base of the PPR log, if present */
732	u8 *ppr_log;
733
734	/* Name for PPR log interrupt */
735	unsigned char ppr_irq_name[16];
736
737	/* Base of the GA log, if present */
738	u8 *ga_log;
739
740	/* Name for GA log interrupt */
741	unsigned char ga_irq_name[16];
742
743	/* Tail of the GA log, if present */
744	u8 *ga_log_tail;
745
746	/* true if interrupts for this IOMMU are already enabled */
747	bool int_enabled;
748
749	/* if one, we need to send a completion wait command */
750	bool need_sync;
751
752	/* true if disable irte caching */
753	bool irtcachedis_enabled;
754
755	/* Handle for IOMMU core code */
756	struct iommu_device iommu;
757
758	/*
759	 * We can't rely on the BIOS to restore all values on reinit, so we
760	 * need to stash them
761	 */
762
763	/* The iommu BAR */
764	u32 stored_addr_lo;
765	u32 stored_addr_hi;
766
767	/*
768	 * Each iommu has 6 l1s, each of which is documented as having 0x12
769	 * registers
770	 */
771	u32 stored_l1[6][0x12];
772
773	/* The l2 indirect registers */
774	u32 stored_l2[0x83];
775
776	/* The maximum PC banks and counters/bank (PCSup=1) */
777	u8 max_banks;
778	u8 max_counters;
779#ifdef CONFIG_IRQ_REMAP
780	struct irq_domain *ir_domain;
781
782	struct amd_irte_ops *irte_ops;
783#endif
784
785	u32 flags;
786	volatile u64 *cmd_sem;
787	atomic64_t cmd_sem_val;
788
789#ifdef CONFIG_AMD_IOMMU_DEBUGFS
790	/* DebugFS Info */
791	struct dentry *debugfs;
792#endif
793
794	/* IOPF support */
795	struct iopf_queue *iopf_queue;
796	unsigned char iopfq_name[32];
797};
798
799static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
800{
801	struct iommu_device *iommu = dev_to_iommu_device(dev);
802
803	return container_of(iommu, struct amd_iommu, iommu);
804}
805
806#define ACPIHID_UID_LEN 256
807#define ACPIHID_HID_LEN 9
808
809struct acpihid_map_entry {
810	struct list_head list;
811	u8 uid[ACPIHID_UID_LEN];
812	u8 hid[ACPIHID_HID_LEN];
813	u32 devid;
814	u32 root_devid;
815	bool cmd_line;
816	struct iommu_group *group;
817};
818
819struct devid_map {
820	struct list_head list;
821	u8 id;
822	u32 devid;
823	bool cmd_line;
824};
825
826#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP     0x1    /* ATS feature supported */
827#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP     0x2    /* PRI feature supported */
828#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP   0x4    /* PASID context supported */
829/* Device may request execution on memory pages */
830#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP    0x8
831/* Device may request super-user privileges */
832#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP   0x10
833
834/*
835 * This struct contains device specific data for the IOMMU
836 */
837struct iommu_dev_data {
838	/*Protect against attach/detach races */
839	spinlock_t lock;
840
841	struct list_head list;		  /* For domain->dev_list */
842	struct llist_node dev_data_list;  /* For global dev_data_list */
843	struct protection_domain *domain; /* Domain the device is bound to */
844	struct gcr3_tbl_info gcr3_info;   /* Per-device GCR3 table */
845	struct device *dev;
846	u16 devid;			  /* PCI Device ID */
847
848	u32 max_pasids;			  /* Max supported PASIDs */
849	u32 flags;			  /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
850	int ats_qdep;
851	u8 ats_enabled  :1;		  /* ATS state */
852	u8 pri_enabled  :1;		  /* PRI state */
853	u8 pasid_enabled:1;		  /* PASID state */
854	u8 pri_tlp      :1;		  /* PASID TLB required for
855					     PPR completions */
856	u8 ppr          :1;		  /* Enable device PPR support */
857	bool use_vapic;			  /* Enable device to use vapic mode */
858	bool defer_attach;
859
860	struct ratelimit_state rs;        /* Ratelimit IOPF messages */
861};
862
863/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
864extern struct list_head ioapic_map;
865extern struct list_head hpet_map;
866extern struct list_head acpihid_map;
867
868/*
869 * List with all PCI segments in the system. This list is not locked because
870 * it is only written at driver initialization time
871 */
872extern struct list_head amd_iommu_pci_seg_list;
873
874/*
875 * List with all IOMMUs in the system. This list is not locked because it is
876 * only written and read at driver initialization or suspend time
877 */
878extern struct list_head amd_iommu_list;
879
880/*
881 * Array with pointers to each IOMMU struct
882 * The indices are referenced in the protection domains
883 */
884extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
885
886/*
887 * Structure defining one entry in the device table
888 */
889struct dev_table_entry {
890	u64 data[4];
891};
892
893/*
894 * One entry for unity mappings parsed out of the ACPI table.
895 */
896struct unity_map_entry {
897	struct list_head list;
898
899	/* starting device id this entry is used for (including) */
900	u16 devid_start;
901	/* end device id this entry is used for (including) */
902	u16 devid_end;
903
904	/* start address to unity map (including) */
905	u64 address_start;
906	/* end address to unity map (including) */
907	u64 address_end;
908
909	/* required protection */
910	int prot;
911};
912
913/*
914 * Data structures for device handling
915 */
916
917/* size of the dma_ops aperture as power of 2 */
918extern unsigned amd_iommu_aperture_order;
919
920/* allocation bitmap for domain ids */
921extern unsigned long *amd_iommu_pd_alloc_bitmap;
922
923extern bool amd_iommu_force_isolation;
924
925/* Max levels of glxval supported */
926extern int amd_iommu_max_glx_val;
927
928/* Global EFR and EFR2 registers */
929extern u64 amd_iommu_efr;
930extern u64 amd_iommu_efr2;
931
932static inline int get_ioapic_devid(int id)
933{
934	struct devid_map *entry;
935
936	list_for_each_entry(entry, &ioapic_map, list) {
937		if (entry->id == id)
938			return entry->devid;
939	}
940
941	return -EINVAL;
942}
943
944static inline int get_hpet_devid(int id)
945{
946	struct devid_map *entry;
947
948	list_for_each_entry(entry, &hpet_map, list) {
949		if (entry->id == id)
950			return entry->devid;
951	}
952
953	return -EINVAL;
954}
955
956enum amd_iommu_intr_mode_type {
957	AMD_IOMMU_GUEST_IR_LEGACY,
958
959	/* This mode is not visible to users. It is used when
960	 * we cannot fully enable vAPIC and fallback to only support
961	 * legacy interrupt remapping via 128-bit IRTE.
962	 */
963	AMD_IOMMU_GUEST_IR_LEGACY_GA,
964	AMD_IOMMU_GUEST_IR_VAPIC,
965};
966
967#define AMD_IOMMU_GUEST_IR_GA(x)	(x == AMD_IOMMU_GUEST_IR_VAPIC || \
968					 x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
969
970#define AMD_IOMMU_GUEST_IR_VAPIC(x)	(x == AMD_IOMMU_GUEST_IR_VAPIC)
971
972union irte {
973	u32 val;
974	struct {
975		u32 valid	: 1,
976		    no_fault	: 1,
977		    int_type	: 3,
978		    rq_eoi	: 1,
979		    dm		: 1,
980		    rsvd_1	: 1,
981		    destination	: 8,
982		    vector	: 8,
983		    rsvd_2	: 8;
984	} fields;
985};
986
987#define APICID_TO_IRTE_DEST_LO(x)    (x & 0xffffff)
988#define APICID_TO_IRTE_DEST_HI(x)    ((x >> 24) & 0xff)
989
990union irte_ga_lo {
991	u64 val;
992
993	/* For int remapping */
994	struct {
995		u64 valid	: 1,
996		    no_fault	: 1,
997		    /* ------ */
998		    int_type	: 3,
999		    rq_eoi	: 1,
1000		    dm		: 1,
1001		    /* ------ */
1002		    guest_mode	: 1,
1003		    destination	: 24,
1004		    ga_tag	: 32;
1005	} fields_remap;
1006
1007	/* For guest vAPIC */
1008	struct {
1009		u64 valid	: 1,
1010		    no_fault	: 1,
1011		    /* ------ */
1012		    ga_log_intr	: 1,
1013		    rsvd1	: 3,
1014		    is_run	: 1,
1015		    /* ------ */
1016		    guest_mode	: 1,
1017		    destination	: 24,
1018		    ga_tag	: 32;
1019	} fields_vapic;
1020};
1021
1022union irte_ga_hi {
1023	u64 val;
1024	struct {
1025		u64 vector	: 8,
1026		    rsvd_1	: 4,
1027		    ga_root_ptr	: 40,
1028		    rsvd_2	: 4,
1029		    destination : 8;
1030	} fields;
1031};
1032
1033struct irte_ga {
1034	union {
1035		struct {
1036			union irte_ga_lo lo;
1037			union irte_ga_hi hi;
1038		};
1039		u128 irte;
1040	};
1041};
1042
1043struct irq_2_irte {
1044	u16 devid; /* Device ID for IRTE table */
1045	u16 index; /* Index into IRTE table*/
1046};
1047
1048struct amd_ir_data {
1049	u32 cached_ga_tag;
1050	struct amd_iommu *iommu;
1051	struct irq_2_irte irq_2_irte;
1052	struct msi_msg msi_entry;
1053	void *entry;    /* Pointer to union irte or struct irte_ga */
1054
1055	/**
1056	 * Store information for activate/de-activate
1057	 * Guest virtual APIC mode during runtime.
1058	 */
1059	struct irq_cfg *cfg;
1060	int ga_vector;
1061	u64 ga_root_ptr;
1062	u32 ga_tag;
1063};
1064
1065struct amd_irte_ops {
1066	void (*prepare)(void *, u32, bool, u8, u32, int);
1067	void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
1068	void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
1069	void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
1070	void *(*get)(struct irq_remap_table *, int);
1071	void (*set_allocated)(struct irq_remap_table *, int);
1072	bool (*is_allocated)(struct irq_remap_table *, int);
1073	void (*clear_allocated)(struct irq_remap_table *, int);
1074};
1075
1076#ifdef CONFIG_IRQ_REMAP
1077extern struct amd_irte_ops irte_32_ops;
1078extern struct amd_irte_ops irte_128_ops;
1079#endif
1080
1081#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
1082