1// SPDX-License-Identifier: GPL-2.0
2/*
3 * hosting IBM Z kernel virtual machines (s390x)
4 *
5 * Copyright IBM Corp. 2008, 2020
6 *
7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
8 *               Christian Borntraeger <borntraeger@de.ibm.com>
9 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10 *               Jason J. Herne <jjherne@us.ibm.com>
11 */
12
13#define KMSG_COMPONENT "kvm-s390"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
19#include <linux/hrtimer.h>
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/mman.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/timer.h>
29#include <linux/vmalloc.h>
30#include <linux/bitmap.h>
31#include <linux/sched/signal.h>
32#include <linux/string.h>
33#include <linux/pgtable.h>
34#include <linux/mmu_notifier.h>
35
36#include <asm/access-regs.h>
37#include <asm/asm-offsets.h>
38#include <asm/lowcore.h>
39#include <asm/stp.h>
40#include <asm/gmap.h>
41#include <asm/nmi.h>
42#include <asm/isc.h>
43#include <asm/sclp.h>
44#include <asm/cpacf.h>
45#include <asm/timex.h>
46#include <asm/fpu.h>
47#include <asm/ap.h>
48#include <asm/uv.h>
49#include "kvm-s390.h"
50#include "gaccess.h"
51#include "pci.h"
52
53#define CREATE_TRACE_POINTS
54#include "trace.h"
55#include "trace-s390.h"
56
57#define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
58#define LOCAL_IRQS 32
59#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60			   (KVM_MAX_VCPUS + LOCAL_IRQS))
61
62const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63	KVM_GENERIC_VM_STATS(),
64	STATS_DESC_COUNTER(VM, inject_io),
65	STATS_DESC_COUNTER(VM, inject_float_mchk),
66	STATS_DESC_COUNTER(VM, inject_pfault_done),
67	STATS_DESC_COUNTER(VM, inject_service_signal),
68	STATS_DESC_COUNTER(VM, inject_virtio),
69	STATS_DESC_COUNTER(VM, aen_forward),
70	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
71	STATS_DESC_COUNTER(VM, gmap_shadow_create),
72	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
73	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
74	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
75	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
76	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
77};
78
79const struct kvm_stats_header kvm_vm_stats_header = {
80	.name_size = KVM_STATS_NAME_SIZE,
81	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
82	.id_offset = sizeof(struct kvm_stats_header),
83	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
84	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
85		       sizeof(kvm_vm_stats_desc),
86};
87
88const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
89	KVM_GENERIC_VCPU_STATS(),
90	STATS_DESC_COUNTER(VCPU, exit_userspace),
91	STATS_DESC_COUNTER(VCPU, exit_null),
92	STATS_DESC_COUNTER(VCPU, exit_external_request),
93	STATS_DESC_COUNTER(VCPU, exit_io_request),
94	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
95	STATS_DESC_COUNTER(VCPU, exit_stop_request),
96	STATS_DESC_COUNTER(VCPU, exit_validity),
97	STATS_DESC_COUNTER(VCPU, exit_instruction),
98	STATS_DESC_COUNTER(VCPU, exit_pei),
99	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
100	STATS_DESC_COUNTER(VCPU, instruction_lctl),
101	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
102	STATS_DESC_COUNTER(VCPU, instruction_stctl),
103	STATS_DESC_COUNTER(VCPU, instruction_stctg),
104	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
105	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
106	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
107	STATS_DESC_COUNTER(VCPU, deliver_ckc),
108	STATS_DESC_COUNTER(VCPU, deliver_cputm),
109	STATS_DESC_COUNTER(VCPU, deliver_external_call),
110	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
111	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
112	STATS_DESC_COUNTER(VCPU, deliver_virtio),
113	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
114	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
115	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
116	STATS_DESC_COUNTER(VCPU, deliver_program),
117	STATS_DESC_COUNTER(VCPU, deliver_io),
118	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
119	STATS_DESC_COUNTER(VCPU, exit_wait_state),
120	STATS_DESC_COUNTER(VCPU, inject_ckc),
121	STATS_DESC_COUNTER(VCPU, inject_cputm),
122	STATS_DESC_COUNTER(VCPU, inject_external_call),
123	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
124	STATS_DESC_COUNTER(VCPU, inject_mchk),
125	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
126	STATS_DESC_COUNTER(VCPU, inject_program),
127	STATS_DESC_COUNTER(VCPU, inject_restart),
128	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
129	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
130	STATS_DESC_COUNTER(VCPU, instruction_epsw),
131	STATS_DESC_COUNTER(VCPU, instruction_gs),
132	STATS_DESC_COUNTER(VCPU, instruction_io_other),
133	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
134	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
135	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
136	STATS_DESC_COUNTER(VCPU, instruction_ptff),
137	STATS_DESC_COUNTER(VCPU, instruction_sck),
138	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
139	STATS_DESC_COUNTER(VCPU, instruction_stidp),
140	STATS_DESC_COUNTER(VCPU, instruction_spx),
141	STATS_DESC_COUNTER(VCPU, instruction_stpx),
142	STATS_DESC_COUNTER(VCPU, instruction_stap),
143	STATS_DESC_COUNTER(VCPU, instruction_iske),
144	STATS_DESC_COUNTER(VCPU, instruction_ri),
145	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
146	STATS_DESC_COUNTER(VCPU, instruction_sske),
147	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
148	STATS_DESC_COUNTER(VCPU, instruction_stsi),
149	STATS_DESC_COUNTER(VCPU, instruction_stfl),
150	STATS_DESC_COUNTER(VCPU, instruction_tb),
151	STATS_DESC_COUNTER(VCPU, instruction_tpi),
152	STATS_DESC_COUNTER(VCPU, instruction_tprot),
153	STATS_DESC_COUNTER(VCPU, instruction_tsch),
154	STATS_DESC_COUNTER(VCPU, instruction_sie),
155	STATS_DESC_COUNTER(VCPU, instruction_essa),
156	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
157	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
158	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
159	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
160	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
161	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
162	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
163	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
164	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
165	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
166	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
167	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
168	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
169	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
170	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
171	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
172	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
173	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
174	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
175	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
176	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
177	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
178	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
179	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
180	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
181	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
182	STATS_DESC_COUNTER(VCPU, pfault_sync)
183};
184
185const struct kvm_stats_header kvm_vcpu_stats_header = {
186	.name_size = KVM_STATS_NAME_SIZE,
187	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
188	.id_offset = sizeof(struct kvm_stats_header),
189	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
190	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
191		       sizeof(kvm_vcpu_stats_desc),
192};
193
194/* allow nested virtualization in KVM (if enabled by user space) */
195static int nested;
196module_param(nested, int, S_IRUGO);
197MODULE_PARM_DESC(nested, "Nested virtualization support");
198
199/* allow 1m huge page guest backing, if !nested */
200static int hpage;
201module_param(hpage, int, 0444);
202MODULE_PARM_DESC(hpage, "1m huge page backing support");
203
204/* maximum percentage of steal time for polling.  >100 is treated like 100 */
205static u8 halt_poll_max_steal = 10;
206module_param(halt_poll_max_steal, byte, 0644);
207MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
208
209/* if set to true, the GISA will be initialized and used if available */
210static bool use_gisa  = true;
211module_param(use_gisa, bool, 0644);
212MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
213
214/* maximum diag9c forwarding per second */
215unsigned int diag9c_forwarding_hz;
216module_param(diag9c_forwarding_hz, uint, 0644);
217MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
218
219/*
220 * allow asynchronous deinit for protected guests; enable by default since
221 * the feature is opt-in anyway
222 */
223static int async_destroy = 1;
224module_param(async_destroy, int, 0444);
225MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
226
227/*
228 * For now we handle at most 16 double words as this is what the s390 base
229 * kernel handles and stores in the prefix page. If we ever need to go beyond
230 * this, this requires changes to code, but the external uapi can stay.
231 */
232#define SIZE_INTERNAL 16
233
234/*
235 * Base feature mask that defines default mask for facilities. Consists of the
236 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
237 */
238static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
239/*
240 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
241 * and defines the facilities that can be enabled via a cpu model.
242 */
243static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
244
245static unsigned long kvm_s390_fac_size(void)
246{
247	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
248	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
249	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
250		sizeof(stfle_fac_list));
251
252	return SIZE_INTERNAL;
253}
254
255/* available cpu features supported by kvm */
256static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
257/* available subfunctions indicated via query / "test bit" */
258static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
259
260static struct gmap_notifier gmap_notifier;
261static struct gmap_notifier vsie_gmap_notifier;
262debug_info_t *kvm_s390_dbf;
263debug_info_t *kvm_s390_dbf_uv;
264
265/* Section: not file related */
266/* forward declarations */
267static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
268			      unsigned long end);
269static int sca_switch_to_extended(struct kvm *kvm);
270
271static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
272{
273	u8 delta_idx = 0;
274
275	/*
276	 * The TOD jumps by delta, we have to compensate this by adding
277	 * -delta to the epoch.
278	 */
279	delta = -delta;
280
281	/* sign-extension - we're adding to signed values below */
282	if ((s64)delta < 0)
283		delta_idx = -1;
284
285	scb->epoch += delta;
286	if (scb->ecd & ECD_MEF) {
287		scb->epdx += delta_idx;
288		if (scb->epoch < delta)
289			scb->epdx += 1;
290	}
291}
292
293/*
294 * This callback is executed during stop_machine(). All CPUs are therefore
295 * temporarily stopped. In order not to change guest behavior, we have to
296 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
297 * so a CPU won't be stopped while calculating with the epoch.
298 */
299static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
300			  void *v)
301{
302	struct kvm *kvm;
303	struct kvm_vcpu *vcpu;
304	unsigned long i;
305	unsigned long long *delta = v;
306
307	list_for_each_entry(kvm, &vm_list, vm_list) {
308		kvm_for_each_vcpu(i, vcpu, kvm) {
309			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
310			if (i == 0) {
311				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
312				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
313			}
314			if (vcpu->arch.cputm_enabled)
315				vcpu->arch.cputm_start += *delta;
316			if (vcpu->arch.vsie_block)
317				kvm_clock_sync_scb(vcpu->arch.vsie_block,
318						   *delta);
319		}
320	}
321	return NOTIFY_OK;
322}
323
324static struct notifier_block kvm_clock_notifier = {
325	.notifier_call = kvm_clock_sync,
326};
327
328static void allow_cpu_feat(unsigned long nr)
329{
330	set_bit_inv(nr, kvm_s390_available_cpu_feat);
331}
332
333static inline int plo_test_bit(unsigned char nr)
334{
335	unsigned long function = (unsigned long)nr | 0x100;
336	int cc;
337
338	asm volatile(
339		"	lgr	0,%[function]\n"
340		/* Parameter registers are ignored for "test bit" */
341		"	plo	0,0,0,0(0)\n"
342		"	ipm	%0\n"
343		"	srl	%0,28\n"
344		: "=d" (cc)
345		: [function] "d" (function)
346		: "cc", "0");
347	return cc == 0;
348}
349
350static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
351{
352	asm volatile(
353		"	lghi	0,0\n"
354		"	lgr	1,%[query]\n"
355		/* Parameter registers are ignored */
356		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
357		:
358		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
359		: "cc", "memory", "0", "1");
360}
361
362#define INSN_SORTL 0xb938
363#define INSN_DFLTCC 0xb939
364
365static void __init kvm_s390_cpu_feat_init(void)
366{
367	int i;
368
369	for (i = 0; i < 256; ++i) {
370		if (plo_test_bit(i))
371			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
372	}
373
374	if (test_facility(28)) /* TOD-clock steering */
375		ptff(kvm_s390_available_subfunc.ptff,
376		     sizeof(kvm_s390_available_subfunc.ptff),
377		     PTFF_QAF);
378
379	if (test_facility(17)) { /* MSA */
380		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
381			      kvm_s390_available_subfunc.kmac);
382		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
383			      kvm_s390_available_subfunc.kmc);
384		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
385			      kvm_s390_available_subfunc.km);
386		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
387			      kvm_s390_available_subfunc.kimd);
388		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
389			      kvm_s390_available_subfunc.klmd);
390	}
391	if (test_facility(76)) /* MSA3 */
392		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
393			      kvm_s390_available_subfunc.pckmo);
394	if (test_facility(77)) { /* MSA4 */
395		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
396			      kvm_s390_available_subfunc.kmctr);
397		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
398			      kvm_s390_available_subfunc.kmf);
399		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
400			      kvm_s390_available_subfunc.kmo);
401		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
402			      kvm_s390_available_subfunc.pcc);
403	}
404	if (test_facility(57)) /* MSA5 */
405		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
406			      kvm_s390_available_subfunc.ppno);
407
408	if (test_facility(146)) /* MSA8 */
409		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
410			      kvm_s390_available_subfunc.kma);
411
412	if (test_facility(155)) /* MSA9 */
413		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
414			      kvm_s390_available_subfunc.kdsa);
415
416	if (test_facility(150)) /* SORTL */
417		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
418
419	if (test_facility(151)) /* DFLTCC */
420		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
421
422	if (MACHINE_HAS_ESOP)
423		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
424	/*
425	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
426	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
427	 */
428	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
429	    !test_facility(3) || !nested)
430		return;
431	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
432	if (sclp.has_64bscao)
433		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
434	if (sclp.has_siif)
435		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
436	if (sclp.has_gpere)
437		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
438	if (sclp.has_gsls)
439		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
440	if (sclp.has_ib)
441		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
442	if (sclp.has_cei)
443		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
444	if (sclp.has_ibs)
445		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
446	if (sclp.has_kss)
447		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
448	/*
449	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
450	 * all skey handling functions read/set the skey from the PGSTE
451	 * instead of the real storage key.
452	 *
453	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
454	 * pages being detected as preserved although they are resident.
455	 *
456	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
457	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
458	 *
459	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
460	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
461	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
462	 *
463	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
464	 * cannot easily shadow the SCA because of the ipte lock.
465	 */
466}
467
468static int __init __kvm_s390_init(void)
469{
470	int rc = -ENOMEM;
471
472	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
473	if (!kvm_s390_dbf)
474		return -ENOMEM;
475
476	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
477	if (!kvm_s390_dbf_uv)
478		goto err_kvm_uv;
479
480	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
481	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
482		goto err_debug_view;
483
484	kvm_s390_cpu_feat_init();
485
486	/* Register floating interrupt controller interface. */
487	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
488	if (rc) {
489		pr_err("A FLIC registration call failed with rc=%d\n", rc);
490		goto err_flic;
491	}
492
493	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
494		rc = kvm_s390_pci_init();
495		if (rc) {
496			pr_err("Unable to allocate AIFT for PCI\n");
497			goto err_pci;
498		}
499	}
500
501	rc = kvm_s390_gib_init(GAL_ISC);
502	if (rc)
503		goto err_gib;
504
505	gmap_notifier.notifier_call = kvm_gmap_notifier;
506	gmap_register_pte_notifier(&gmap_notifier);
507	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
508	gmap_register_pte_notifier(&vsie_gmap_notifier);
509	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
510				       &kvm_clock_notifier);
511
512	return 0;
513
514err_gib:
515	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
516		kvm_s390_pci_exit();
517err_pci:
518err_flic:
519err_debug_view:
520	debug_unregister(kvm_s390_dbf_uv);
521err_kvm_uv:
522	debug_unregister(kvm_s390_dbf);
523	return rc;
524}
525
526static void __kvm_s390_exit(void)
527{
528	gmap_unregister_pte_notifier(&gmap_notifier);
529	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
530	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
531					 &kvm_clock_notifier);
532
533	kvm_s390_gib_destroy();
534	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
535		kvm_s390_pci_exit();
536	debug_unregister(kvm_s390_dbf);
537	debug_unregister(kvm_s390_dbf_uv);
538}
539
540/* Section: device related */
541long kvm_arch_dev_ioctl(struct file *filp,
542			unsigned int ioctl, unsigned long arg)
543{
544	if (ioctl == KVM_S390_ENABLE_SIE)
545		return s390_enable_sie();
546	return -EINVAL;
547}
548
549int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
550{
551	int r;
552
553	switch (ext) {
554	case KVM_CAP_S390_PSW:
555	case KVM_CAP_S390_GMAP:
556	case KVM_CAP_SYNC_MMU:
557#ifdef CONFIG_KVM_S390_UCONTROL
558	case KVM_CAP_S390_UCONTROL:
559#endif
560	case KVM_CAP_ASYNC_PF:
561	case KVM_CAP_SYNC_REGS:
562	case KVM_CAP_ONE_REG:
563	case KVM_CAP_ENABLE_CAP:
564	case KVM_CAP_S390_CSS_SUPPORT:
565	case KVM_CAP_IOEVENTFD:
566	case KVM_CAP_S390_IRQCHIP:
567	case KVM_CAP_VM_ATTRIBUTES:
568	case KVM_CAP_MP_STATE:
569	case KVM_CAP_IMMEDIATE_EXIT:
570	case KVM_CAP_S390_INJECT_IRQ:
571	case KVM_CAP_S390_USER_SIGP:
572	case KVM_CAP_S390_USER_STSI:
573	case KVM_CAP_S390_SKEYS:
574	case KVM_CAP_S390_IRQ_STATE:
575	case KVM_CAP_S390_USER_INSTR0:
576	case KVM_CAP_S390_CMMA_MIGRATION:
577	case KVM_CAP_S390_AIS:
578	case KVM_CAP_S390_AIS_MIGRATION:
579	case KVM_CAP_S390_VCPU_RESETS:
580	case KVM_CAP_SET_GUEST_DEBUG:
581	case KVM_CAP_S390_DIAG318:
582	case KVM_CAP_IRQFD_RESAMPLE:
583		r = 1;
584		break;
585	case KVM_CAP_SET_GUEST_DEBUG2:
586		r = KVM_GUESTDBG_VALID_MASK;
587		break;
588	case KVM_CAP_S390_HPAGE_1M:
589		r = 0;
590		if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
591			r = 1;
592		break;
593	case KVM_CAP_S390_MEM_OP:
594		r = MEM_OP_MAX_SIZE;
595		break;
596	case KVM_CAP_S390_MEM_OP_EXTENSION:
597		/*
598		 * Flag bits indicating which extensions are supported.
599		 * If r > 0, the base extension must also be supported/indicated,
600		 * in order to maintain backwards compatibility.
601		 */
602		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
603		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
604		break;
605	case KVM_CAP_NR_VCPUS:
606	case KVM_CAP_MAX_VCPUS:
607	case KVM_CAP_MAX_VCPU_ID:
608		r = KVM_S390_BSCA_CPU_SLOTS;
609		if (!kvm_s390_use_sca_entries())
610			r = KVM_MAX_VCPUS;
611		else if (sclp.has_esca && sclp.has_64bscao)
612			r = KVM_S390_ESCA_CPU_SLOTS;
613		if (ext == KVM_CAP_NR_VCPUS)
614			r = min_t(unsigned int, num_online_cpus(), r);
615		break;
616	case KVM_CAP_S390_COW:
617		r = MACHINE_HAS_ESOP;
618		break;
619	case KVM_CAP_S390_VECTOR_REGISTERS:
620		r = test_facility(129);
621		break;
622	case KVM_CAP_S390_RI:
623		r = test_facility(64);
624		break;
625	case KVM_CAP_S390_GS:
626		r = test_facility(133);
627		break;
628	case KVM_CAP_S390_BPB:
629		r = test_facility(82);
630		break;
631	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
632		r = async_destroy && is_prot_virt_host();
633		break;
634	case KVM_CAP_S390_PROTECTED:
635		r = is_prot_virt_host();
636		break;
637	case KVM_CAP_S390_PROTECTED_DUMP: {
638		u64 pv_cmds_dump[] = {
639			BIT_UVC_CMD_DUMP_INIT,
640			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
641			BIT_UVC_CMD_DUMP_CPU,
642			BIT_UVC_CMD_DUMP_COMPLETE,
643		};
644		int i;
645
646		r = is_prot_virt_host();
647
648		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
649			if (!test_bit_inv(pv_cmds_dump[i],
650					  (unsigned long *)&uv_info.inst_calls_list)) {
651				r = 0;
652				break;
653			}
654		}
655		break;
656	}
657	case KVM_CAP_S390_ZPCI_OP:
658		r = kvm_s390_pci_interp_allowed();
659		break;
660	case KVM_CAP_S390_CPU_TOPOLOGY:
661		r = test_facility(11);
662		break;
663	default:
664		r = 0;
665	}
666	return r;
667}
668
669void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
670{
671	int i;
672	gfn_t cur_gfn, last_gfn;
673	unsigned long gaddr, vmaddr;
674	struct gmap *gmap = kvm->arch.gmap;
675	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
676
677	/* Loop over all guest segments */
678	cur_gfn = memslot->base_gfn;
679	last_gfn = memslot->base_gfn + memslot->npages;
680	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
681		gaddr = gfn_to_gpa(cur_gfn);
682		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
683		if (kvm_is_error_hva(vmaddr))
684			continue;
685
686		bitmap_zero(bitmap, _PAGE_ENTRIES);
687		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
688		for (i = 0; i < _PAGE_ENTRIES; i++) {
689			if (test_bit(i, bitmap))
690				mark_page_dirty(kvm, cur_gfn + i);
691		}
692
693		if (fatal_signal_pending(current))
694			return;
695		cond_resched();
696	}
697}
698
699/* Section: vm related */
700static void sca_del_vcpu(struct kvm_vcpu *vcpu);
701
702/*
703 * Get (and clear) the dirty memory log for a memory slot.
704 */
705int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
706			       struct kvm_dirty_log *log)
707{
708	int r;
709	unsigned long n;
710	struct kvm_memory_slot *memslot;
711	int is_dirty;
712
713	if (kvm_is_ucontrol(kvm))
714		return -EINVAL;
715
716	mutex_lock(&kvm->slots_lock);
717
718	r = -EINVAL;
719	if (log->slot >= KVM_USER_MEM_SLOTS)
720		goto out;
721
722	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
723	if (r)
724		goto out;
725
726	/* Clear the dirty log */
727	if (is_dirty) {
728		n = kvm_dirty_bitmap_bytes(memslot);
729		memset(memslot->dirty_bitmap, 0, n);
730	}
731	r = 0;
732out:
733	mutex_unlock(&kvm->slots_lock);
734	return r;
735}
736
737static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
738{
739	unsigned long i;
740	struct kvm_vcpu *vcpu;
741
742	kvm_for_each_vcpu(i, vcpu, kvm) {
743		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
744	}
745}
746
747int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
748{
749	int r;
750
751	if (cap->flags)
752		return -EINVAL;
753
754	switch (cap->cap) {
755	case KVM_CAP_S390_IRQCHIP:
756		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
757		kvm->arch.use_irqchip = 1;
758		r = 0;
759		break;
760	case KVM_CAP_S390_USER_SIGP:
761		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
762		kvm->arch.user_sigp = 1;
763		r = 0;
764		break;
765	case KVM_CAP_S390_VECTOR_REGISTERS:
766		mutex_lock(&kvm->lock);
767		if (kvm->created_vcpus) {
768			r = -EBUSY;
769		} else if (cpu_has_vx()) {
770			set_kvm_facility(kvm->arch.model.fac_mask, 129);
771			set_kvm_facility(kvm->arch.model.fac_list, 129);
772			if (test_facility(134)) {
773				set_kvm_facility(kvm->arch.model.fac_mask, 134);
774				set_kvm_facility(kvm->arch.model.fac_list, 134);
775			}
776			if (test_facility(135)) {
777				set_kvm_facility(kvm->arch.model.fac_mask, 135);
778				set_kvm_facility(kvm->arch.model.fac_list, 135);
779			}
780			if (test_facility(148)) {
781				set_kvm_facility(kvm->arch.model.fac_mask, 148);
782				set_kvm_facility(kvm->arch.model.fac_list, 148);
783			}
784			if (test_facility(152)) {
785				set_kvm_facility(kvm->arch.model.fac_mask, 152);
786				set_kvm_facility(kvm->arch.model.fac_list, 152);
787			}
788			if (test_facility(192)) {
789				set_kvm_facility(kvm->arch.model.fac_mask, 192);
790				set_kvm_facility(kvm->arch.model.fac_list, 192);
791			}
792			r = 0;
793		} else
794			r = -EINVAL;
795		mutex_unlock(&kvm->lock);
796		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
797			 r ? "(not available)" : "(success)");
798		break;
799	case KVM_CAP_S390_RI:
800		r = -EINVAL;
801		mutex_lock(&kvm->lock);
802		if (kvm->created_vcpus) {
803			r = -EBUSY;
804		} else if (test_facility(64)) {
805			set_kvm_facility(kvm->arch.model.fac_mask, 64);
806			set_kvm_facility(kvm->arch.model.fac_list, 64);
807			r = 0;
808		}
809		mutex_unlock(&kvm->lock);
810		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
811			 r ? "(not available)" : "(success)");
812		break;
813	case KVM_CAP_S390_AIS:
814		mutex_lock(&kvm->lock);
815		if (kvm->created_vcpus) {
816			r = -EBUSY;
817		} else {
818			set_kvm_facility(kvm->arch.model.fac_mask, 72);
819			set_kvm_facility(kvm->arch.model.fac_list, 72);
820			r = 0;
821		}
822		mutex_unlock(&kvm->lock);
823		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
824			 r ? "(not available)" : "(success)");
825		break;
826	case KVM_CAP_S390_GS:
827		r = -EINVAL;
828		mutex_lock(&kvm->lock);
829		if (kvm->created_vcpus) {
830			r = -EBUSY;
831		} else if (test_facility(133)) {
832			set_kvm_facility(kvm->arch.model.fac_mask, 133);
833			set_kvm_facility(kvm->arch.model.fac_list, 133);
834			r = 0;
835		}
836		mutex_unlock(&kvm->lock);
837		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
838			 r ? "(not available)" : "(success)");
839		break;
840	case KVM_CAP_S390_HPAGE_1M:
841		mutex_lock(&kvm->lock);
842		if (kvm->created_vcpus)
843			r = -EBUSY;
844		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
845			r = -EINVAL;
846		else {
847			r = 0;
848			mmap_write_lock(kvm->mm);
849			kvm->mm->context.allow_gmap_hpage_1m = 1;
850			mmap_write_unlock(kvm->mm);
851			/*
852			 * We might have to create fake 4k page
853			 * tables. To avoid that the hardware works on
854			 * stale PGSTEs, we emulate these instructions.
855			 */
856			kvm->arch.use_skf = 0;
857			kvm->arch.use_pfmfi = 0;
858		}
859		mutex_unlock(&kvm->lock);
860		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
861			 r ? "(not available)" : "(success)");
862		break;
863	case KVM_CAP_S390_USER_STSI:
864		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
865		kvm->arch.user_stsi = 1;
866		r = 0;
867		break;
868	case KVM_CAP_S390_USER_INSTR0:
869		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
870		kvm->arch.user_instr0 = 1;
871		icpt_operexc_on_all_vcpus(kvm);
872		r = 0;
873		break;
874	case KVM_CAP_S390_CPU_TOPOLOGY:
875		r = -EINVAL;
876		mutex_lock(&kvm->lock);
877		if (kvm->created_vcpus) {
878			r = -EBUSY;
879		} else if (test_facility(11)) {
880			set_kvm_facility(kvm->arch.model.fac_mask, 11);
881			set_kvm_facility(kvm->arch.model.fac_list, 11);
882			r = 0;
883		}
884		mutex_unlock(&kvm->lock);
885		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
886			 r ? "(not available)" : "(success)");
887		break;
888	default:
889		r = -EINVAL;
890		break;
891	}
892	return r;
893}
894
895static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
896{
897	int ret;
898
899	switch (attr->attr) {
900	case KVM_S390_VM_MEM_LIMIT_SIZE:
901		ret = 0;
902		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
903			 kvm->arch.mem_limit);
904		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
905			ret = -EFAULT;
906		break;
907	default:
908		ret = -ENXIO;
909		break;
910	}
911	return ret;
912}
913
914static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
915{
916	int ret;
917	unsigned int idx;
918	switch (attr->attr) {
919	case KVM_S390_VM_MEM_ENABLE_CMMA:
920		ret = -ENXIO;
921		if (!sclp.has_cmma)
922			break;
923
924		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
925		mutex_lock(&kvm->lock);
926		if (kvm->created_vcpus)
927			ret = -EBUSY;
928		else if (kvm->mm->context.allow_gmap_hpage_1m)
929			ret = -EINVAL;
930		else {
931			kvm->arch.use_cmma = 1;
932			/* Not compatible with cmma. */
933			kvm->arch.use_pfmfi = 0;
934			ret = 0;
935		}
936		mutex_unlock(&kvm->lock);
937		break;
938	case KVM_S390_VM_MEM_CLR_CMMA:
939		ret = -ENXIO;
940		if (!sclp.has_cmma)
941			break;
942		ret = -EINVAL;
943		if (!kvm->arch.use_cmma)
944			break;
945
946		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
947		mutex_lock(&kvm->lock);
948		idx = srcu_read_lock(&kvm->srcu);
949		s390_reset_cmma(kvm->arch.gmap->mm);
950		srcu_read_unlock(&kvm->srcu, idx);
951		mutex_unlock(&kvm->lock);
952		ret = 0;
953		break;
954	case KVM_S390_VM_MEM_LIMIT_SIZE: {
955		unsigned long new_limit;
956
957		if (kvm_is_ucontrol(kvm))
958			return -EINVAL;
959
960		if (get_user(new_limit, (u64 __user *)attr->addr))
961			return -EFAULT;
962
963		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
964		    new_limit > kvm->arch.mem_limit)
965			return -E2BIG;
966
967		if (!new_limit)
968			return -EINVAL;
969
970		/* gmap_create takes last usable address */
971		if (new_limit != KVM_S390_NO_MEM_LIMIT)
972			new_limit -= 1;
973
974		ret = -EBUSY;
975		mutex_lock(&kvm->lock);
976		if (!kvm->created_vcpus) {
977			/* gmap_create will round the limit up */
978			struct gmap *new = gmap_create(current->mm, new_limit);
979
980			if (!new) {
981				ret = -ENOMEM;
982			} else {
983				gmap_remove(kvm->arch.gmap);
984				new->private = kvm;
985				kvm->arch.gmap = new;
986				ret = 0;
987			}
988		}
989		mutex_unlock(&kvm->lock);
990		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
991		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
992			 (void *) kvm->arch.gmap->asce);
993		break;
994	}
995	default:
996		ret = -ENXIO;
997		break;
998	}
999	return ret;
1000}
1001
1002static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1003
1004void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1005{
1006	struct kvm_vcpu *vcpu;
1007	unsigned long i;
1008
1009	kvm_s390_vcpu_block_all(kvm);
1010
1011	kvm_for_each_vcpu(i, vcpu, kvm) {
1012		kvm_s390_vcpu_crypto_setup(vcpu);
1013		/* recreate the shadow crycb by leaving the VSIE handler */
1014		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1015	}
1016
1017	kvm_s390_vcpu_unblock_all(kvm);
1018}
1019
1020static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1021{
1022	mutex_lock(&kvm->lock);
1023	switch (attr->attr) {
1024	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1025		if (!test_kvm_facility(kvm, 76)) {
1026			mutex_unlock(&kvm->lock);
1027			return -EINVAL;
1028		}
1029		get_random_bytes(
1030			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1031			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1032		kvm->arch.crypto.aes_kw = 1;
1033		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1034		break;
1035	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1036		if (!test_kvm_facility(kvm, 76)) {
1037			mutex_unlock(&kvm->lock);
1038			return -EINVAL;
1039		}
1040		get_random_bytes(
1041			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1042			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1043		kvm->arch.crypto.dea_kw = 1;
1044		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1045		break;
1046	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1047		if (!test_kvm_facility(kvm, 76)) {
1048			mutex_unlock(&kvm->lock);
1049			return -EINVAL;
1050		}
1051		kvm->arch.crypto.aes_kw = 0;
1052		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1053			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1054		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1055		break;
1056	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1057		if (!test_kvm_facility(kvm, 76)) {
1058			mutex_unlock(&kvm->lock);
1059			return -EINVAL;
1060		}
1061		kvm->arch.crypto.dea_kw = 0;
1062		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1063			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1064		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1065		break;
1066	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1067		if (!ap_instructions_available()) {
1068			mutex_unlock(&kvm->lock);
1069			return -EOPNOTSUPP;
1070		}
1071		kvm->arch.crypto.apie = 1;
1072		break;
1073	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1074		if (!ap_instructions_available()) {
1075			mutex_unlock(&kvm->lock);
1076			return -EOPNOTSUPP;
1077		}
1078		kvm->arch.crypto.apie = 0;
1079		break;
1080	default:
1081		mutex_unlock(&kvm->lock);
1082		return -ENXIO;
1083	}
1084
1085	kvm_s390_vcpu_crypto_reset_all(kvm);
1086	mutex_unlock(&kvm->lock);
1087	return 0;
1088}
1089
1090static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1091{
1092	/* Only set the ECB bits after guest requests zPCI interpretation */
1093	if (!vcpu->kvm->arch.use_zpci_interp)
1094		return;
1095
1096	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1097	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1098}
1099
1100void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1101{
1102	struct kvm_vcpu *vcpu;
1103	unsigned long i;
1104
1105	lockdep_assert_held(&kvm->lock);
1106
1107	if (!kvm_s390_pci_interp_allowed())
1108		return;
1109
1110	/*
1111	 * If host is configured for PCI and the necessary facilities are
1112	 * available, turn on interpretation for the life of this guest
1113	 */
1114	kvm->arch.use_zpci_interp = 1;
1115
1116	kvm_s390_vcpu_block_all(kvm);
1117
1118	kvm_for_each_vcpu(i, vcpu, kvm) {
1119		kvm_s390_vcpu_pci_setup(vcpu);
1120		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1121	}
1122
1123	kvm_s390_vcpu_unblock_all(kvm);
1124}
1125
1126static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1127{
1128	unsigned long cx;
1129	struct kvm_vcpu *vcpu;
1130
1131	kvm_for_each_vcpu(cx, vcpu, kvm)
1132		kvm_s390_sync_request(req, vcpu);
1133}
1134
1135/*
1136 * Must be called with kvm->srcu held to avoid races on memslots, and with
1137 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1138 */
1139static int kvm_s390_vm_start_migration(struct kvm *kvm)
1140{
1141	struct kvm_memory_slot *ms;
1142	struct kvm_memslots *slots;
1143	unsigned long ram_pages = 0;
1144	int bkt;
1145
1146	/* migration mode already enabled */
1147	if (kvm->arch.migration_mode)
1148		return 0;
1149	slots = kvm_memslots(kvm);
1150	if (!slots || kvm_memslots_empty(slots))
1151		return -EINVAL;
1152
1153	if (!kvm->arch.use_cmma) {
1154		kvm->arch.migration_mode = 1;
1155		return 0;
1156	}
1157	/* mark all the pages in active slots as dirty */
1158	kvm_for_each_memslot(ms, bkt, slots) {
1159		if (!ms->dirty_bitmap)
1160			return -EINVAL;
1161		/*
1162		 * The second half of the bitmap is only used on x86,
1163		 * and would be wasted otherwise, so we put it to good
1164		 * use here to keep track of the state of the storage
1165		 * attributes.
1166		 */
1167		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1168		ram_pages += ms->npages;
1169	}
1170	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1171	kvm->arch.migration_mode = 1;
1172	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1173	return 0;
1174}
1175
1176/*
1177 * Must be called with kvm->slots_lock to avoid races with ourselves and
1178 * kvm_s390_vm_start_migration.
1179 */
1180static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1181{
1182	/* migration mode already disabled */
1183	if (!kvm->arch.migration_mode)
1184		return 0;
1185	kvm->arch.migration_mode = 0;
1186	if (kvm->arch.use_cmma)
1187		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1188	return 0;
1189}
1190
1191static int kvm_s390_vm_set_migration(struct kvm *kvm,
1192				     struct kvm_device_attr *attr)
1193{
1194	int res = -ENXIO;
1195
1196	mutex_lock(&kvm->slots_lock);
1197	switch (attr->attr) {
1198	case KVM_S390_VM_MIGRATION_START:
1199		res = kvm_s390_vm_start_migration(kvm);
1200		break;
1201	case KVM_S390_VM_MIGRATION_STOP:
1202		res = kvm_s390_vm_stop_migration(kvm);
1203		break;
1204	default:
1205		break;
1206	}
1207	mutex_unlock(&kvm->slots_lock);
1208
1209	return res;
1210}
1211
1212static int kvm_s390_vm_get_migration(struct kvm *kvm,
1213				     struct kvm_device_attr *attr)
1214{
1215	u64 mig = kvm->arch.migration_mode;
1216
1217	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1218		return -ENXIO;
1219
1220	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1221		return -EFAULT;
1222	return 0;
1223}
1224
1225static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1226
1227static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1228{
1229	struct kvm_s390_vm_tod_clock gtod;
1230
1231	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1232		return -EFAULT;
1233
1234	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1235		return -EINVAL;
1236	__kvm_s390_set_tod_clock(kvm, &gtod);
1237
1238	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1239		gtod.epoch_idx, gtod.tod);
1240
1241	return 0;
1242}
1243
1244static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1245{
1246	u8 gtod_high;
1247
1248	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1249					   sizeof(gtod_high)))
1250		return -EFAULT;
1251
1252	if (gtod_high != 0)
1253		return -EINVAL;
1254	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1255
1256	return 0;
1257}
1258
1259static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1260{
1261	struct kvm_s390_vm_tod_clock gtod = { 0 };
1262
1263	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1264			   sizeof(gtod.tod)))
1265		return -EFAULT;
1266
1267	__kvm_s390_set_tod_clock(kvm, &gtod);
1268	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1269	return 0;
1270}
1271
1272static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1273{
1274	int ret;
1275
1276	if (attr->flags)
1277		return -EINVAL;
1278
1279	mutex_lock(&kvm->lock);
1280	/*
1281	 * For protected guests, the TOD is managed by the ultravisor, so trying
1282	 * to change it will never bring the expected results.
1283	 */
1284	if (kvm_s390_pv_is_protected(kvm)) {
1285		ret = -EOPNOTSUPP;
1286		goto out_unlock;
1287	}
1288
1289	switch (attr->attr) {
1290	case KVM_S390_VM_TOD_EXT:
1291		ret = kvm_s390_set_tod_ext(kvm, attr);
1292		break;
1293	case KVM_S390_VM_TOD_HIGH:
1294		ret = kvm_s390_set_tod_high(kvm, attr);
1295		break;
1296	case KVM_S390_VM_TOD_LOW:
1297		ret = kvm_s390_set_tod_low(kvm, attr);
1298		break;
1299	default:
1300		ret = -ENXIO;
1301		break;
1302	}
1303
1304out_unlock:
1305	mutex_unlock(&kvm->lock);
1306	return ret;
1307}
1308
1309static void kvm_s390_get_tod_clock(struct kvm *kvm,
1310				   struct kvm_s390_vm_tod_clock *gtod)
1311{
1312	union tod_clock clk;
1313
1314	preempt_disable();
1315
1316	store_tod_clock_ext(&clk);
1317
1318	gtod->tod = clk.tod + kvm->arch.epoch;
1319	gtod->epoch_idx = 0;
1320	if (test_kvm_facility(kvm, 139)) {
1321		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1322		if (gtod->tod < clk.tod)
1323			gtod->epoch_idx += 1;
1324	}
1325
1326	preempt_enable();
1327}
1328
1329static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1330{
1331	struct kvm_s390_vm_tod_clock gtod;
1332
1333	memset(&gtod, 0, sizeof(gtod));
1334	kvm_s390_get_tod_clock(kvm, &gtod);
1335	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1336		return -EFAULT;
1337
1338	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1339		gtod.epoch_idx, gtod.tod);
1340	return 0;
1341}
1342
1343static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1344{
1345	u8 gtod_high = 0;
1346
1347	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1348					 sizeof(gtod_high)))
1349		return -EFAULT;
1350	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1351
1352	return 0;
1353}
1354
1355static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1356{
1357	u64 gtod;
1358
1359	gtod = kvm_s390_get_tod_clock_fast(kvm);
1360	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1361		return -EFAULT;
1362	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1363
1364	return 0;
1365}
1366
1367static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1368{
1369	int ret;
1370
1371	if (attr->flags)
1372		return -EINVAL;
1373
1374	switch (attr->attr) {
1375	case KVM_S390_VM_TOD_EXT:
1376		ret = kvm_s390_get_tod_ext(kvm, attr);
1377		break;
1378	case KVM_S390_VM_TOD_HIGH:
1379		ret = kvm_s390_get_tod_high(kvm, attr);
1380		break;
1381	case KVM_S390_VM_TOD_LOW:
1382		ret = kvm_s390_get_tod_low(kvm, attr);
1383		break;
1384	default:
1385		ret = -ENXIO;
1386		break;
1387	}
1388	return ret;
1389}
1390
1391static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1392{
1393	struct kvm_s390_vm_cpu_processor *proc;
1394	u16 lowest_ibc, unblocked_ibc;
1395	int ret = 0;
1396
1397	mutex_lock(&kvm->lock);
1398	if (kvm->created_vcpus) {
1399		ret = -EBUSY;
1400		goto out;
1401	}
1402	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1403	if (!proc) {
1404		ret = -ENOMEM;
1405		goto out;
1406	}
1407	if (!copy_from_user(proc, (void __user *)attr->addr,
1408			    sizeof(*proc))) {
1409		kvm->arch.model.cpuid = proc->cpuid;
1410		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1411		unblocked_ibc = sclp.ibc & 0xfff;
1412		if (lowest_ibc && proc->ibc) {
1413			if (proc->ibc > unblocked_ibc)
1414				kvm->arch.model.ibc = unblocked_ibc;
1415			else if (proc->ibc < lowest_ibc)
1416				kvm->arch.model.ibc = lowest_ibc;
1417			else
1418				kvm->arch.model.ibc = proc->ibc;
1419		}
1420		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1421		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1422		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1423			 kvm->arch.model.ibc,
1424			 kvm->arch.model.cpuid);
1425		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1426			 kvm->arch.model.fac_list[0],
1427			 kvm->arch.model.fac_list[1],
1428			 kvm->arch.model.fac_list[2]);
1429	} else
1430		ret = -EFAULT;
1431	kfree(proc);
1432out:
1433	mutex_unlock(&kvm->lock);
1434	return ret;
1435}
1436
1437static int kvm_s390_set_processor_feat(struct kvm *kvm,
1438				       struct kvm_device_attr *attr)
1439{
1440	struct kvm_s390_vm_cpu_feat data;
1441
1442	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1443		return -EFAULT;
1444	if (!bitmap_subset((unsigned long *) data.feat,
1445			   kvm_s390_available_cpu_feat,
1446			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1447		return -EINVAL;
1448
1449	mutex_lock(&kvm->lock);
1450	if (kvm->created_vcpus) {
1451		mutex_unlock(&kvm->lock);
1452		return -EBUSY;
1453	}
1454	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1455	mutex_unlock(&kvm->lock);
1456	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1457			 data.feat[0],
1458			 data.feat[1],
1459			 data.feat[2]);
1460	return 0;
1461}
1462
1463static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1464					  struct kvm_device_attr *attr)
1465{
1466	mutex_lock(&kvm->lock);
1467	if (kvm->created_vcpus) {
1468		mutex_unlock(&kvm->lock);
1469		return -EBUSY;
1470	}
1471
1472	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1473			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1474		mutex_unlock(&kvm->lock);
1475		return -EFAULT;
1476	}
1477	mutex_unlock(&kvm->lock);
1478
1479	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1480		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1481		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1482		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1483		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1484	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1485		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1486		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1487	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1488		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1489		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1490	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1491		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1492		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1493	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1494		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1495		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1496	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1497		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1498		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1499	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1500		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1501		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1502	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1503		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1504		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1505	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1506		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1507		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1508	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1509		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1510		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1511	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1512		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1513		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1514	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1515		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1516		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1517	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1518		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1519		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1520	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1521		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1522		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1523	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1524		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1525		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1526	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1527		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1528		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1529		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1530		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1531	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1532		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1533		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1534		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1535		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1536
1537	return 0;
1538}
1539
1540#define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK	\
1541(						\
1542	((struct kvm_s390_vm_cpu_uv_feat){	\
1543		.ap = 1,			\
1544		.ap_intr = 1,			\
1545	})					\
1546	.feat					\
1547)
1548
1549static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1550{
1551	struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1552	unsigned long data, filter;
1553
1554	filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1555	if (get_user(data, &ptr->feat))
1556		return -EFAULT;
1557	if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1558		return -EINVAL;
1559
1560	mutex_lock(&kvm->lock);
1561	if (kvm->created_vcpus) {
1562		mutex_unlock(&kvm->lock);
1563		return -EBUSY;
1564	}
1565	kvm->arch.model.uv_feat_guest.feat = data;
1566	mutex_unlock(&kvm->lock);
1567
1568	VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1569
1570	return 0;
1571}
1572
1573static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1574{
1575	int ret = -ENXIO;
1576
1577	switch (attr->attr) {
1578	case KVM_S390_VM_CPU_PROCESSOR:
1579		ret = kvm_s390_set_processor(kvm, attr);
1580		break;
1581	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1582		ret = kvm_s390_set_processor_feat(kvm, attr);
1583		break;
1584	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1585		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1586		break;
1587	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1588		ret = kvm_s390_set_uv_feat(kvm, attr);
1589		break;
1590	}
1591	return ret;
1592}
1593
1594static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1595{
1596	struct kvm_s390_vm_cpu_processor *proc;
1597	int ret = 0;
1598
1599	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1600	if (!proc) {
1601		ret = -ENOMEM;
1602		goto out;
1603	}
1604	proc->cpuid = kvm->arch.model.cpuid;
1605	proc->ibc = kvm->arch.model.ibc;
1606	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1607	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1608	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1609		 kvm->arch.model.ibc,
1610		 kvm->arch.model.cpuid);
1611	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1612		 kvm->arch.model.fac_list[0],
1613		 kvm->arch.model.fac_list[1],
1614		 kvm->arch.model.fac_list[2]);
1615	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1616		ret = -EFAULT;
1617	kfree(proc);
1618out:
1619	return ret;
1620}
1621
1622static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1623{
1624	struct kvm_s390_vm_cpu_machine *mach;
1625	int ret = 0;
1626
1627	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1628	if (!mach) {
1629		ret = -ENOMEM;
1630		goto out;
1631	}
1632	get_cpu_id((struct cpuid *) &mach->cpuid);
1633	mach->ibc = sclp.ibc;
1634	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1635	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1636	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1637	       sizeof(stfle_fac_list));
1638	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1639		 kvm->arch.model.ibc,
1640		 kvm->arch.model.cpuid);
1641	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1642		 mach->fac_mask[0],
1643		 mach->fac_mask[1],
1644		 mach->fac_mask[2]);
1645	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1646		 mach->fac_list[0],
1647		 mach->fac_list[1],
1648		 mach->fac_list[2]);
1649	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1650		ret = -EFAULT;
1651	kfree(mach);
1652out:
1653	return ret;
1654}
1655
1656static int kvm_s390_get_processor_feat(struct kvm *kvm,
1657				       struct kvm_device_attr *attr)
1658{
1659	struct kvm_s390_vm_cpu_feat data;
1660
1661	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1662	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1663		return -EFAULT;
1664	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1665			 data.feat[0],
1666			 data.feat[1],
1667			 data.feat[2]);
1668	return 0;
1669}
1670
1671static int kvm_s390_get_machine_feat(struct kvm *kvm,
1672				     struct kvm_device_attr *attr)
1673{
1674	struct kvm_s390_vm_cpu_feat data;
1675
1676	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1677	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1678		return -EFAULT;
1679	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1680			 data.feat[0],
1681			 data.feat[1],
1682			 data.feat[2]);
1683	return 0;
1684}
1685
1686static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1687					  struct kvm_device_attr *attr)
1688{
1689	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1690	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1691		return -EFAULT;
1692
1693	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1694		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1695		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1696		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1697		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1698	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1699		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1700		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1701	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1702		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1703		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1704	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1705		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1706		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1707	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1708		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1709		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1710	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1711		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1712		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1713	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1714		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1715		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1716	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1717		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1718		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1719	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1720		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1721		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1722	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1723		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1724		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1725	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1726		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1727		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1728	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1729		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1730		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1731	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1732		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1733		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1734	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1735		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1736		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1737	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1738		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1739		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1740	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1741		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1742		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1743		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1744		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1745	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1746		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1747		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1748		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1749		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1750
1751	return 0;
1752}
1753
1754static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1755					struct kvm_device_attr *attr)
1756{
1757	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1758	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1759		return -EFAULT;
1760
1761	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1762		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1763		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1764		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1765		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1766	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1767		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1768		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1769	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1770		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1771		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1772	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1773		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1774		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1775	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1776		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1777		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1778	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1779		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1780		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1781	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1782		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1783		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1784	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1785		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1786		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1787	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1788		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1789		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1790	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1791		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1792		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1793	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1794		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1795		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1796	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1797		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1798		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1799	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1800		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1801		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1802	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1803		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1804		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1805	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1806		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1807		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1808	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1809		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1810		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1811		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1812		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1813	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1814		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1815		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1816		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1817		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1818
1819	return 0;
1820}
1821
1822static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1823{
1824	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1825	unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1826
1827	if (put_user(feat, &dst->feat))
1828		return -EFAULT;
1829	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1830
1831	return 0;
1832}
1833
1834static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1835{
1836	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1837	unsigned long feat;
1838
1839	BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1840
1841	feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1842	if (put_user(feat, &dst->feat))
1843		return -EFAULT;
1844	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1845
1846	return 0;
1847}
1848
1849static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1850{
1851	int ret = -ENXIO;
1852
1853	switch (attr->attr) {
1854	case KVM_S390_VM_CPU_PROCESSOR:
1855		ret = kvm_s390_get_processor(kvm, attr);
1856		break;
1857	case KVM_S390_VM_CPU_MACHINE:
1858		ret = kvm_s390_get_machine(kvm, attr);
1859		break;
1860	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1861		ret = kvm_s390_get_processor_feat(kvm, attr);
1862		break;
1863	case KVM_S390_VM_CPU_MACHINE_FEAT:
1864		ret = kvm_s390_get_machine_feat(kvm, attr);
1865		break;
1866	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1867		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1868		break;
1869	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1870		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1871		break;
1872	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1873		ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1874		break;
1875	case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1876		ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1877		break;
1878	}
1879	return ret;
1880}
1881
1882/**
1883 * kvm_s390_update_topology_change_report - update CPU topology change report
1884 * @kvm: guest KVM description
1885 * @val: set or clear the MTCR bit
1886 *
1887 * Updates the Multiprocessor Topology-Change-Report bit to signal
1888 * the guest with a topology change.
1889 * This is only relevant if the topology facility is present.
1890 *
1891 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1892 */
1893static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1894{
1895	union sca_utility new, old;
1896	struct bsca_block *sca;
1897
1898	read_lock(&kvm->arch.sca_lock);
1899	sca = kvm->arch.sca;
1900	do {
1901		old = READ_ONCE(sca->utility);
1902		new = old;
1903		new.mtcr = val;
1904	} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
1905	read_unlock(&kvm->arch.sca_lock);
1906}
1907
1908static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1909					       struct kvm_device_attr *attr)
1910{
1911	if (!test_kvm_facility(kvm, 11))
1912		return -ENXIO;
1913
1914	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1915	return 0;
1916}
1917
1918static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1919					       struct kvm_device_attr *attr)
1920{
1921	u8 topo;
1922
1923	if (!test_kvm_facility(kvm, 11))
1924		return -ENXIO;
1925
1926	read_lock(&kvm->arch.sca_lock);
1927	topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1928	read_unlock(&kvm->arch.sca_lock);
1929
1930	return put_user(topo, (u8 __user *)attr->addr);
1931}
1932
1933static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1934{
1935	int ret;
1936
1937	switch (attr->group) {
1938	case KVM_S390_VM_MEM_CTRL:
1939		ret = kvm_s390_set_mem_control(kvm, attr);
1940		break;
1941	case KVM_S390_VM_TOD:
1942		ret = kvm_s390_set_tod(kvm, attr);
1943		break;
1944	case KVM_S390_VM_CPU_MODEL:
1945		ret = kvm_s390_set_cpu_model(kvm, attr);
1946		break;
1947	case KVM_S390_VM_CRYPTO:
1948		ret = kvm_s390_vm_set_crypto(kvm, attr);
1949		break;
1950	case KVM_S390_VM_MIGRATION:
1951		ret = kvm_s390_vm_set_migration(kvm, attr);
1952		break;
1953	case KVM_S390_VM_CPU_TOPOLOGY:
1954		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1955		break;
1956	default:
1957		ret = -ENXIO;
1958		break;
1959	}
1960
1961	return ret;
1962}
1963
1964static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1965{
1966	int ret;
1967
1968	switch (attr->group) {
1969	case KVM_S390_VM_MEM_CTRL:
1970		ret = kvm_s390_get_mem_control(kvm, attr);
1971		break;
1972	case KVM_S390_VM_TOD:
1973		ret = kvm_s390_get_tod(kvm, attr);
1974		break;
1975	case KVM_S390_VM_CPU_MODEL:
1976		ret = kvm_s390_get_cpu_model(kvm, attr);
1977		break;
1978	case KVM_S390_VM_MIGRATION:
1979		ret = kvm_s390_vm_get_migration(kvm, attr);
1980		break;
1981	case KVM_S390_VM_CPU_TOPOLOGY:
1982		ret = kvm_s390_get_topo_change_indication(kvm, attr);
1983		break;
1984	default:
1985		ret = -ENXIO;
1986		break;
1987	}
1988
1989	return ret;
1990}
1991
1992static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1993{
1994	int ret;
1995
1996	switch (attr->group) {
1997	case KVM_S390_VM_MEM_CTRL:
1998		switch (attr->attr) {
1999		case KVM_S390_VM_MEM_ENABLE_CMMA:
2000		case KVM_S390_VM_MEM_CLR_CMMA:
2001			ret = sclp.has_cmma ? 0 : -ENXIO;
2002			break;
2003		case KVM_S390_VM_MEM_LIMIT_SIZE:
2004			ret = 0;
2005			break;
2006		default:
2007			ret = -ENXIO;
2008			break;
2009		}
2010		break;
2011	case KVM_S390_VM_TOD:
2012		switch (attr->attr) {
2013		case KVM_S390_VM_TOD_LOW:
2014		case KVM_S390_VM_TOD_HIGH:
2015			ret = 0;
2016			break;
2017		default:
2018			ret = -ENXIO;
2019			break;
2020		}
2021		break;
2022	case KVM_S390_VM_CPU_MODEL:
2023		switch (attr->attr) {
2024		case KVM_S390_VM_CPU_PROCESSOR:
2025		case KVM_S390_VM_CPU_MACHINE:
2026		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2027		case KVM_S390_VM_CPU_MACHINE_FEAT:
2028		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2029		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2030		case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2031		case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2032			ret = 0;
2033			break;
2034		default:
2035			ret = -ENXIO;
2036			break;
2037		}
2038		break;
2039	case KVM_S390_VM_CRYPTO:
2040		switch (attr->attr) {
2041		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2042		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2043		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2044		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2045			ret = 0;
2046			break;
2047		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2048		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2049			ret = ap_instructions_available() ? 0 : -ENXIO;
2050			break;
2051		default:
2052			ret = -ENXIO;
2053			break;
2054		}
2055		break;
2056	case KVM_S390_VM_MIGRATION:
2057		ret = 0;
2058		break;
2059	case KVM_S390_VM_CPU_TOPOLOGY:
2060		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2061		break;
2062	default:
2063		ret = -ENXIO;
2064		break;
2065	}
2066
2067	return ret;
2068}
2069
2070static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2071{
2072	uint8_t *keys;
2073	uint64_t hva;
2074	int srcu_idx, i, r = 0;
2075
2076	if (args->flags != 0)
2077		return -EINVAL;
2078
2079	/* Is this guest using storage keys? */
2080	if (!mm_uses_skeys(current->mm))
2081		return KVM_S390_GET_SKEYS_NONE;
2082
2083	/* Enforce sane limit on memory allocation */
2084	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2085		return -EINVAL;
2086
2087	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2088	if (!keys)
2089		return -ENOMEM;
2090
2091	mmap_read_lock(current->mm);
2092	srcu_idx = srcu_read_lock(&kvm->srcu);
2093	for (i = 0; i < args->count; i++) {
2094		hva = gfn_to_hva(kvm, args->start_gfn + i);
2095		if (kvm_is_error_hva(hva)) {
2096			r = -EFAULT;
2097			break;
2098		}
2099
2100		r = get_guest_storage_key(current->mm, hva, &keys[i]);
2101		if (r)
2102			break;
2103	}
2104	srcu_read_unlock(&kvm->srcu, srcu_idx);
2105	mmap_read_unlock(current->mm);
2106
2107	if (!r) {
2108		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2109				 sizeof(uint8_t) * args->count);
2110		if (r)
2111			r = -EFAULT;
2112	}
2113
2114	kvfree(keys);
2115	return r;
2116}
2117
2118static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2119{
2120	uint8_t *keys;
2121	uint64_t hva;
2122	int srcu_idx, i, r = 0;
2123	bool unlocked;
2124
2125	if (args->flags != 0)
2126		return -EINVAL;
2127
2128	/* Enforce sane limit on memory allocation */
2129	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2130		return -EINVAL;
2131
2132	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2133	if (!keys)
2134		return -ENOMEM;
2135
2136	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2137			   sizeof(uint8_t) * args->count);
2138	if (r) {
2139		r = -EFAULT;
2140		goto out;
2141	}
2142
2143	/* Enable storage key handling for the guest */
2144	r = s390_enable_skey();
2145	if (r)
2146		goto out;
2147
2148	i = 0;
2149	mmap_read_lock(current->mm);
2150	srcu_idx = srcu_read_lock(&kvm->srcu);
2151        while (i < args->count) {
2152		unlocked = false;
2153		hva = gfn_to_hva(kvm, args->start_gfn + i);
2154		if (kvm_is_error_hva(hva)) {
2155			r = -EFAULT;
2156			break;
2157		}
2158
2159		/* Lowest order bit is reserved */
2160		if (keys[i] & 0x01) {
2161			r = -EINVAL;
2162			break;
2163		}
2164
2165		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2166		if (r) {
2167			r = fixup_user_fault(current->mm, hva,
2168					     FAULT_FLAG_WRITE, &unlocked);
2169			if (r)
2170				break;
2171		}
2172		if (!r)
2173			i++;
2174	}
2175	srcu_read_unlock(&kvm->srcu, srcu_idx);
2176	mmap_read_unlock(current->mm);
2177out:
2178	kvfree(keys);
2179	return r;
2180}
2181
2182/*
2183 * Base address and length must be sent at the start of each block, therefore
2184 * it's cheaper to send some clean data, as long as it's less than the size of
2185 * two longs.
2186 */
2187#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2188/* for consistency */
2189#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2190
2191static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2192			      u8 *res, unsigned long bufsize)
2193{
2194	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2195
2196	args->count = 0;
2197	while (args->count < bufsize) {
2198		hva = gfn_to_hva(kvm, cur_gfn);
2199		/*
2200		 * We return an error if the first value was invalid, but we
2201		 * return successfully if at least one value was copied.
2202		 */
2203		if (kvm_is_error_hva(hva))
2204			return args->count ? 0 : -EFAULT;
2205		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2206			pgstev = 0;
2207		res[args->count++] = (pgstev >> 24) & 0x43;
2208		cur_gfn++;
2209	}
2210
2211	return 0;
2212}
2213
2214static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2215						     gfn_t gfn)
2216{
2217	return ____gfn_to_memslot(slots, gfn, true);
2218}
2219
2220static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2221					      unsigned long cur_gfn)
2222{
2223	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2224	unsigned long ofs = cur_gfn - ms->base_gfn;
2225	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2226
2227	if (ms->base_gfn + ms->npages <= cur_gfn) {
2228		mnode = rb_next(mnode);
2229		/* If we are above the highest slot, wrap around */
2230		if (!mnode)
2231			mnode = rb_first(&slots->gfn_tree);
2232
2233		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2234		ofs = 0;
2235	}
2236
2237	if (cur_gfn < ms->base_gfn)
2238		ofs = 0;
2239
2240	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2241	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2242		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2243		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2244	}
2245	return ms->base_gfn + ofs;
2246}
2247
2248static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2249			     u8 *res, unsigned long bufsize)
2250{
2251	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2252	struct kvm_memslots *slots = kvm_memslots(kvm);
2253	struct kvm_memory_slot *ms;
2254
2255	if (unlikely(kvm_memslots_empty(slots)))
2256		return 0;
2257
2258	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2259	ms = gfn_to_memslot(kvm, cur_gfn);
2260	args->count = 0;
2261	args->start_gfn = cur_gfn;
2262	if (!ms)
2263		return 0;
2264	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2265	mem_end = kvm_s390_get_gfn_end(slots);
2266
2267	while (args->count < bufsize) {
2268		hva = gfn_to_hva(kvm, cur_gfn);
2269		if (kvm_is_error_hva(hva))
2270			return 0;
2271		/* Decrement only if we actually flipped the bit to 0 */
2272		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2273			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2274		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2275			pgstev = 0;
2276		/* Save the value */
2277		res[args->count++] = (pgstev >> 24) & 0x43;
2278		/* If the next bit is too far away, stop. */
2279		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2280			return 0;
2281		/* If we reached the previous "next", find the next one */
2282		if (cur_gfn == next_gfn)
2283			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2284		/* Reached the end of memory or of the buffer, stop */
2285		if ((next_gfn >= mem_end) ||
2286		    (next_gfn - args->start_gfn >= bufsize))
2287			return 0;
2288		cur_gfn++;
2289		/* Reached the end of the current memslot, take the next one. */
2290		if (cur_gfn - ms->base_gfn >= ms->npages) {
2291			ms = gfn_to_memslot(kvm, cur_gfn);
2292			if (!ms)
2293				return 0;
2294		}
2295	}
2296	return 0;
2297}
2298
2299/*
2300 * This function searches for the next page with dirty CMMA attributes, and
2301 * saves the attributes in the buffer up to either the end of the buffer or
2302 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2303 * no trailing clean bytes are saved.
2304 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2305 * output buffer will indicate 0 as length.
2306 */
2307static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2308				  struct kvm_s390_cmma_log *args)
2309{
2310	unsigned long bufsize;
2311	int srcu_idx, peek, ret;
2312	u8 *values;
2313
2314	if (!kvm->arch.use_cmma)
2315		return -ENXIO;
2316	/* Invalid/unsupported flags were specified */
2317	if (args->flags & ~KVM_S390_CMMA_PEEK)
2318		return -EINVAL;
2319	/* Migration mode query, and we are not doing a migration */
2320	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2321	if (!peek && !kvm->arch.migration_mode)
2322		return -EINVAL;
2323	/* CMMA is disabled or was not used, or the buffer has length zero */
2324	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2325	if (!bufsize || !kvm->mm->context.uses_cmm) {
2326		memset(args, 0, sizeof(*args));
2327		return 0;
2328	}
2329	/* We are not peeking, and there are no dirty pages */
2330	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2331		memset(args, 0, sizeof(*args));
2332		return 0;
2333	}
2334
2335	values = vmalloc(bufsize);
2336	if (!values)
2337		return -ENOMEM;
2338
2339	mmap_read_lock(kvm->mm);
2340	srcu_idx = srcu_read_lock(&kvm->srcu);
2341	if (peek)
2342		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2343	else
2344		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2345	srcu_read_unlock(&kvm->srcu, srcu_idx);
2346	mmap_read_unlock(kvm->mm);
2347
2348	if (kvm->arch.migration_mode)
2349		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2350	else
2351		args->remaining = 0;
2352
2353	if (copy_to_user((void __user *)args->values, values, args->count))
2354		ret = -EFAULT;
2355
2356	vfree(values);
2357	return ret;
2358}
2359
2360/*
2361 * This function sets the CMMA attributes for the given pages. If the input
2362 * buffer has zero length, no action is taken, otherwise the attributes are
2363 * set and the mm->context.uses_cmm flag is set.
2364 */
2365static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2366				  const struct kvm_s390_cmma_log *args)
2367{
2368	unsigned long hva, mask, pgstev, i;
2369	uint8_t *bits;
2370	int srcu_idx, r = 0;
2371
2372	mask = args->mask;
2373
2374	if (!kvm->arch.use_cmma)
2375		return -ENXIO;
2376	/* invalid/unsupported flags */
2377	if (args->flags != 0)
2378		return -EINVAL;
2379	/* Enforce sane limit on memory allocation */
2380	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2381		return -EINVAL;
2382	/* Nothing to do */
2383	if (args->count == 0)
2384		return 0;
2385
2386	bits = vmalloc(array_size(sizeof(*bits), args->count));
2387	if (!bits)
2388		return -ENOMEM;
2389
2390	r = copy_from_user(bits, (void __user *)args->values, args->count);
2391	if (r) {
2392		r = -EFAULT;
2393		goto out;
2394	}
2395
2396	mmap_read_lock(kvm->mm);
2397	srcu_idx = srcu_read_lock(&kvm->srcu);
2398	for (i = 0; i < args->count; i++) {
2399		hva = gfn_to_hva(kvm, args->start_gfn + i);
2400		if (kvm_is_error_hva(hva)) {
2401			r = -EFAULT;
2402			break;
2403		}
2404
2405		pgstev = bits[i];
2406		pgstev = pgstev << 24;
2407		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2408		set_pgste_bits(kvm->mm, hva, mask, pgstev);
2409	}
2410	srcu_read_unlock(&kvm->srcu, srcu_idx);
2411	mmap_read_unlock(kvm->mm);
2412
2413	if (!kvm->mm->context.uses_cmm) {
2414		mmap_write_lock(kvm->mm);
2415		kvm->mm->context.uses_cmm = 1;
2416		mmap_write_unlock(kvm->mm);
2417	}
2418out:
2419	vfree(bits);
2420	return r;
2421}
2422
2423/**
2424 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2425 * non protected.
2426 * @kvm: the VM whose protected vCPUs are to be converted
2427 * @rc: return value for the RC field of the UVC (in case of error)
2428 * @rrc: return value for the RRC field of the UVC (in case of error)
2429 *
2430 * Does not stop in case of error, tries to convert as many
2431 * CPUs as possible. In case of error, the RC and RRC of the last error are
2432 * returned.
2433 *
2434 * Return: 0 in case of success, otherwise -EIO
2435 */
2436int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2437{
2438	struct kvm_vcpu *vcpu;
2439	unsigned long i;
2440	u16 _rc, _rrc;
2441	int ret = 0;
2442
2443	/*
2444	 * We ignore failures and try to destroy as many CPUs as possible.
2445	 * At the same time we must not free the assigned resources when
2446	 * this fails, as the ultravisor has still access to that memory.
2447	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2448	 * behind.
2449	 * We want to return the first failure rc and rrc, though.
2450	 */
2451	kvm_for_each_vcpu(i, vcpu, kvm) {
2452		mutex_lock(&vcpu->mutex);
2453		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2454			*rc = _rc;
2455			*rrc = _rrc;
2456			ret = -EIO;
2457		}
2458		mutex_unlock(&vcpu->mutex);
2459	}
2460	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2461	if (use_gisa)
2462		kvm_s390_gisa_enable(kvm);
2463	return ret;
2464}
2465
2466/**
2467 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2468 * to protected.
2469 * @kvm: the VM whose protected vCPUs are to be converted
2470 * @rc: return value for the RC field of the UVC (in case of error)
2471 * @rrc: return value for the RRC field of the UVC (in case of error)
2472 *
2473 * Tries to undo the conversion in case of error.
2474 *
2475 * Return: 0 in case of success, otherwise -EIO
2476 */
2477static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2478{
2479	unsigned long i;
2480	int r = 0;
2481	u16 dummy;
2482
2483	struct kvm_vcpu *vcpu;
2484
2485	/* Disable the GISA if the ultravisor does not support AIV. */
2486	if (!uv_has_feature(BIT_UV_FEAT_AIV))
2487		kvm_s390_gisa_disable(kvm);
2488
2489	kvm_for_each_vcpu(i, vcpu, kvm) {
2490		mutex_lock(&vcpu->mutex);
2491		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2492		mutex_unlock(&vcpu->mutex);
2493		if (r)
2494			break;
2495	}
2496	if (r)
2497		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2498	return r;
2499}
2500
2501/*
2502 * Here we provide user space with a direct interface to query UV
2503 * related data like UV maxima and available features as well as
2504 * feature specific data.
2505 *
2506 * To facilitate future extension of the data structures we'll try to
2507 * write data up to the maximum requested length.
2508 */
2509static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2510{
2511	ssize_t len_min;
2512
2513	switch (info->header.id) {
2514	case KVM_PV_INFO_VM: {
2515		len_min =  sizeof(info->header) + sizeof(info->vm);
2516
2517		if (info->header.len_max < len_min)
2518			return -EINVAL;
2519
2520		memcpy(info->vm.inst_calls_list,
2521		       uv_info.inst_calls_list,
2522		       sizeof(uv_info.inst_calls_list));
2523
2524		/* It's max cpuid not max cpus, so it's off by one */
2525		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2526		info->vm.max_guests = uv_info.max_num_sec_conf;
2527		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2528		info->vm.feature_indication = uv_info.uv_feature_indications;
2529
2530		return len_min;
2531	}
2532	case KVM_PV_INFO_DUMP: {
2533		len_min =  sizeof(info->header) + sizeof(info->dump);
2534
2535		if (info->header.len_max < len_min)
2536			return -EINVAL;
2537
2538		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2539		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2540		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2541		return len_min;
2542	}
2543	default:
2544		return -EINVAL;
2545	}
2546}
2547
2548static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2549			   struct kvm_s390_pv_dmp dmp)
2550{
2551	int r = -EINVAL;
2552	void __user *result_buff = (void __user *)dmp.buff_addr;
2553
2554	switch (dmp.subcmd) {
2555	case KVM_PV_DUMP_INIT: {
2556		if (kvm->arch.pv.dumping)
2557			break;
2558
2559		/*
2560		 * Block SIE entry as concurrent dump UVCs could lead
2561		 * to validities.
2562		 */
2563		kvm_s390_vcpu_block_all(kvm);
2564
2565		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2566				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2567		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2568			     cmd->rc, cmd->rrc);
2569		if (!r) {
2570			kvm->arch.pv.dumping = true;
2571		} else {
2572			kvm_s390_vcpu_unblock_all(kvm);
2573			r = -EINVAL;
2574		}
2575		break;
2576	}
2577	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2578		if (!kvm->arch.pv.dumping)
2579			break;
2580
2581		/*
2582		 * gaddr is an output parameter since we might stop
2583		 * early. As dmp will be copied back in our caller, we
2584		 * don't need to do it ourselves.
2585		 */
2586		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2587						&cmd->rc, &cmd->rrc);
2588		break;
2589	}
2590	case KVM_PV_DUMP_COMPLETE: {
2591		if (!kvm->arch.pv.dumping)
2592			break;
2593
2594		r = -EINVAL;
2595		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2596			break;
2597
2598		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2599					      &cmd->rc, &cmd->rrc);
2600		break;
2601	}
2602	default:
2603		r = -ENOTTY;
2604		break;
2605	}
2606
2607	return r;
2608}
2609
2610static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2611{
2612	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2613	void __user *argp = (void __user *)cmd->data;
2614	int r = 0;
2615	u16 dummy;
2616
2617	if (need_lock)
2618		mutex_lock(&kvm->lock);
2619
2620	switch (cmd->cmd) {
2621	case KVM_PV_ENABLE: {
2622		r = -EINVAL;
2623		if (kvm_s390_pv_is_protected(kvm))
2624			break;
2625
2626		/*
2627		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
2628		 *  esca, we need no cleanup in the error cases below
2629		 */
2630		r = sca_switch_to_extended(kvm);
2631		if (r)
2632			break;
2633
2634		r = s390_disable_cow_sharing();
2635		if (r)
2636			break;
2637
2638		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2639		if (r)
2640			break;
2641
2642		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2643		if (r)
2644			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2645
2646		/* we need to block service interrupts from now on */
2647		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2648		break;
2649	}
2650	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2651		r = -EINVAL;
2652		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2653			break;
2654
2655		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2656		/*
2657		 * If a CPU could not be destroyed, destroy VM will also fail.
2658		 * There is no point in trying to destroy it. Instead return
2659		 * the rc and rrc from the first CPU that failed destroying.
2660		 */
2661		if (r)
2662			break;
2663		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2664
2665		/* no need to block service interrupts any more */
2666		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2667		break;
2668	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2669		r = -EINVAL;
2670		if (!async_destroy)
2671			break;
2672		/* kvm->lock must not be held; this is asserted inside the function. */
2673		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2674		break;
2675	case KVM_PV_DISABLE: {
2676		r = -EINVAL;
2677		if (!kvm_s390_pv_is_protected(kvm))
2678			break;
2679
2680		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2681		/*
2682		 * If a CPU could not be destroyed, destroy VM will also fail.
2683		 * There is no point in trying to destroy it. Instead return
2684		 * the rc and rrc from the first CPU that failed destroying.
2685		 */
2686		if (r)
2687			break;
2688		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2689
2690		/* no need to block service interrupts any more */
2691		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2692		break;
2693	}
2694	case KVM_PV_SET_SEC_PARMS: {
2695		struct kvm_s390_pv_sec_parm parms = {};
2696		void *hdr;
2697
2698		r = -EINVAL;
2699		if (!kvm_s390_pv_is_protected(kvm))
2700			break;
2701
2702		r = -EFAULT;
2703		if (copy_from_user(&parms, argp, sizeof(parms)))
2704			break;
2705
2706		/* Currently restricted to 8KB */
2707		r = -EINVAL;
2708		if (parms.length > PAGE_SIZE * 2)
2709			break;
2710
2711		r = -ENOMEM;
2712		hdr = vmalloc(parms.length);
2713		if (!hdr)
2714			break;
2715
2716		r = -EFAULT;
2717		if (!copy_from_user(hdr, (void __user *)parms.origin,
2718				    parms.length))
2719			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2720						      &cmd->rc, &cmd->rrc);
2721
2722		vfree(hdr);
2723		break;
2724	}
2725	case KVM_PV_UNPACK: {
2726		struct kvm_s390_pv_unp unp = {};
2727
2728		r = -EINVAL;
2729		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2730			break;
2731
2732		r = -EFAULT;
2733		if (copy_from_user(&unp, argp, sizeof(unp)))
2734			break;
2735
2736		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2737				       &cmd->rc, &cmd->rrc);
2738		break;
2739	}
2740	case KVM_PV_VERIFY: {
2741		r = -EINVAL;
2742		if (!kvm_s390_pv_is_protected(kvm))
2743			break;
2744
2745		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2746				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2747		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2748			     cmd->rrc);
2749		break;
2750	}
2751	case KVM_PV_PREP_RESET: {
2752		r = -EINVAL;
2753		if (!kvm_s390_pv_is_protected(kvm))
2754			break;
2755
2756		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2757				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2758		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2759			     cmd->rc, cmd->rrc);
2760		break;
2761	}
2762	case KVM_PV_UNSHARE_ALL: {
2763		r = -EINVAL;
2764		if (!kvm_s390_pv_is_protected(kvm))
2765			break;
2766
2767		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2768				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2769		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2770			     cmd->rc, cmd->rrc);
2771		break;
2772	}
2773	case KVM_PV_INFO: {
2774		struct kvm_s390_pv_info info = {};
2775		ssize_t data_len;
2776
2777		/*
2778		 * No need to check the VM protection here.
2779		 *
2780		 * Maybe user space wants to query some of the data
2781		 * when the VM is still unprotected. If we see the
2782		 * need to fence a new data command we can still
2783		 * return an error in the info handler.
2784		 */
2785
2786		r = -EFAULT;
2787		if (copy_from_user(&info, argp, sizeof(info.header)))
2788			break;
2789
2790		r = -EINVAL;
2791		if (info.header.len_max < sizeof(info.header))
2792			break;
2793
2794		data_len = kvm_s390_handle_pv_info(&info);
2795		if (data_len < 0) {
2796			r = data_len;
2797			break;
2798		}
2799		/*
2800		 * If a data command struct is extended (multiple
2801		 * times) this can be used to determine how much of it
2802		 * is valid.
2803		 */
2804		info.header.len_written = data_len;
2805
2806		r = -EFAULT;
2807		if (copy_to_user(argp, &info, data_len))
2808			break;
2809
2810		r = 0;
2811		break;
2812	}
2813	case KVM_PV_DUMP: {
2814		struct kvm_s390_pv_dmp dmp;
2815
2816		r = -EINVAL;
2817		if (!kvm_s390_pv_is_protected(kvm))
2818			break;
2819
2820		r = -EFAULT;
2821		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2822			break;
2823
2824		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2825		if (r)
2826			break;
2827
2828		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2829			r = -EFAULT;
2830			break;
2831		}
2832
2833		break;
2834	}
2835	default:
2836		r = -ENOTTY;
2837	}
2838	if (need_lock)
2839		mutex_unlock(&kvm->lock);
2840
2841	return r;
2842}
2843
2844static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2845{
2846	if (mop->flags & ~supported_flags || !mop->size)
2847		return -EINVAL;
2848	if (mop->size > MEM_OP_MAX_SIZE)
2849		return -E2BIG;
2850	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2851		if (mop->key > 0xf)
2852			return -EINVAL;
2853	} else {
2854		mop->key = 0;
2855	}
2856	return 0;
2857}
2858
2859static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2860{
2861	void __user *uaddr = (void __user *)mop->buf;
2862	enum gacc_mode acc_mode;
2863	void *tmpbuf = NULL;
2864	int r, srcu_idx;
2865
2866	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2867					KVM_S390_MEMOP_F_CHECK_ONLY);
2868	if (r)
2869		return r;
2870
2871	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2872		tmpbuf = vmalloc(mop->size);
2873		if (!tmpbuf)
2874			return -ENOMEM;
2875	}
2876
2877	srcu_idx = srcu_read_lock(&kvm->srcu);
2878
2879	if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2880		r = PGM_ADDRESSING;
2881		goto out_unlock;
2882	}
2883
2884	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2885	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2886		r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2887		goto out_unlock;
2888	}
2889	if (acc_mode == GACC_FETCH) {
2890		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2891					      mop->size, GACC_FETCH, mop->key);
2892		if (r)
2893			goto out_unlock;
2894		if (copy_to_user(uaddr, tmpbuf, mop->size))
2895			r = -EFAULT;
2896	} else {
2897		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2898			r = -EFAULT;
2899			goto out_unlock;
2900		}
2901		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2902					      mop->size, GACC_STORE, mop->key);
2903	}
2904
2905out_unlock:
2906	srcu_read_unlock(&kvm->srcu, srcu_idx);
2907
2908	vfree(tmpbuf);
2909	return r;
2910}
2911
2912static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2913{
2914	void __user *uaddr = (void __user *)mop->buf;
2915	void __user *old_addr = (void __user *)mop->old_addr;
2916	union {
2917		__uint128_t quad;
2918		char raw[sizeof(__uint128_t)];
2919	} old = { .quad = 0}, new = { .quad = 0 };
2920	unsigned int off_in_quad = sizeof(new) - mop->size;
2921	int r, srcu_idx;
2922	bool success;
2923
2924	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2925	if (r)
2926		return r;
2927	/*
2928	 * This validates off_in_quad. Checking that size is a power
2929	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2930	 * takes care of that
2931	 */
2932	if (mop->size > sizeof(new))
2933		return -EINVAL;
2934	if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2935		return -EFAULT;
2936	if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2937		return -EFAULT;
2938
2939	srcu_idx = srcu_read_lock(&kvm->srcu);
2940
2941	if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2942		r = PGM_ADDRESSING;
2943		goto out_unlock;
2944	}
2945
2946	r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2947				       new.quad, mop->key, &success);
2948	if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2949		r = -EFAULT;
2950
2951out_unlock:
2952	srcu_read_unlock(&kvm->srcu, srcu_idx);
2953	return r;
2954}
2955
2956static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2957{
2958	/*
2959	 * This is technically a heuristic only, if the kvm->lock is not
2960	 * taken, it is not guaranteed that the vm is/remains non-protected.
2961	 * This is ok from a kernel perspective, wrongdoing is detected
2962	 * on the access, -EFAULT is returned and the vm may crash the
2963	 * next time it accesses the memory in question.
2964	 * There is no sane usecase to do switching and a memop on two
2965	 * different CPUs at the same time.
2966	 */
2967	if (kvm_s390_pv_get_handle(kvm))
2968		return -EINVAL;
2969
2970	switch (mop->op) {
2971	case KVM_S390_MEMOP_ABSOLUTE_READ:
2972	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2973		return kvm_s390_vm_mem_op_abs(kvm, mop);
2974	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2975		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2976	default:
2977		return -EINVAL;
2978	}
2979}
2980
2981int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2982{
2983	struct kvm *kvm = filp->private_data;
2984	void __user *argp = (void __user *)arg;
2985	struct kvm_device_attr attr;
2986	int r;
2987
2988	switch (ioctl) {
2989	case KVM_S390_INTERRUPT: {
2990		struct kvm_s390_interrupt s390int;
2991
2992		r = -EFAULT;
2993		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2994			break;
2995		r = kvm_s390_inject_vm(kvm, &s390int);
2996		break;
2997	}
2998	case KVM_CREATE_IRQCHIP: {
2999		struct kvm_irq_routing_entry routing;
3000
3001		r = -EINVAL;
3002		if (kvm->arch.use_irqchip) {
3003			/* Set up dummy routing. */
3004			memset(&routing, 0, sizeof(routing));
3005			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
3006		}
3007		break;
3008	}
3009	case KVM_SET_DEVICE_ATTR: {
3010		r = -EFAULT;
3011		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3012			break;
3013		r = kvm_s390_vm_set_attr(kvm, &attr);
3014		break;
3015	}
3016	case KVM_GET_DEVICE_ATTR: {
3017		r = -EFAULT;
3018		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3019			break;
3020		r = kvm_s390_vm_get_attr(kvm, &attr);
3021		break;
3022	}
3023	case KVM_HAS_DEVICE_ATTR: {
3024		r = -EFAULT;
3025		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3026			break;
3027		r = kvm_s390_vm_has_attr(kvm, &attr);
3028		break;
3029	}
3030	case KVM_S390_GET_SKEYS: {
3031		struct kvm_s390_skeys args;
3032
3033		r = -EFAULT;
3034		if (copy_from_user(&args, argp,
3035				   sizeof(struct kvm_s390_skeys)))
3036			break;
3037		r = kvm_s390_get_skeys(kvm, &args);
3038		break;
3039	}
3040	case KVM_S390_SET_SKEYS: {
3041		struct kvm_s390_skeys args;
3042
3043		r = -EFAULT;
3044		if (copy_from_user(&args, argp,
3045				   sizeof(struct kvm_s390_skeys)))
3046			break;
3047		r = kvm_s390_set_skeys(kvm, &args);
3048		break;
3049	}
3050	case KVM_S390_GET_CMMA_BITS: {
3051		struct kvm_s390_cmma_log args;
3052
3053		r = -EFAULT;
3054		if (copy_from_user(&args, argp, sizeof(args)))
3055			break;
3056		mutex_lock(&kvm->slots_lock);
3057		r = kvm_s390_get_cmma_bits(kvm, &args);
3058		mutex_unlock(&kvm->slots_lock);
3059		if (!r) {
3060			r = copy_to_user(argp, &args, sizeof(args));
3061			if (r)
3062				r = -EFAULT;
3063		}
3064		break;
3065	}
3066	case KVM_S390_SET_CMMA_BITS: {
3067		struct kvm_s390_cmma_log args;
3068
3069		r = -EFAULT;
3070		if (copy_from_user(&args, argp, sizeof(args)))
3071			break;
3072		mutex_lock(&kvm->slots_lock);
3073		r = kvm_s390_set_cmma_bits(kvm, &args);
3074		mutex_unlock(&kvm->slots_lock);
3075		break;
3076	}
3077	case KVM_S390_PV_COMMAND: {
3078		struct kvm_pv_cmd args;
3079
3080		/* protvirt means user cpu state */
3081		kvm_s390_set_user_cpu_state_ctrl(kvm);
3082		r = 0;
3083		if (!is_prot_virt_host()) {
3084			r = -EINVAL;
3085			break;
3086		}
3087		if (copy_from_user(&args, argp, sizeof(args))) {
3088			r = -EFAULT;
3089			break;
3090		}
3091		if (args.flags) {
3092			r = -EINVAL;
3093			break;
3094		}
3095		/* must be called without kvm->lock */
3096		r = kvm_s390_handle_pv(kvm, &args);
3097		if (copy_to_user(argp, &args, sizeof(args))) {
3098			r = -EFAULT;
3099			break;
3100		}
3101		break;
3102	}
3103	case KVM_S390_MEM_OP: {
3104		struct kvm_s390_mem_op mem_op;
3105
3106		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3107			r = kvm_s390_vm_mem_op(kvm, &mem_op);
3108		else
3109			r = -EFAULT;
3110		break;
3111	}
3112	case KVM_S390_ZPCI_OP: {
3113		struct kvm_s390_zpci_op args;
3114
3115		r = -EINVAL;
3116		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3117			break;
3118		if (copy_from_user(&args, argp, sizeof(args))) {
3119			r = -EFAULT;
3120			break;
3121		}
3122		r = kvm_s390_pci_zpci_op(kvm, &args);
3123		break;
3124	}
3125	default:
3126		r = -ENOTTY;
3127	}
3128
3129	return r;
3130}
3131
3132static int kvm_s390_apxa_installed(void)
3133{
3134	struct ap_config_info info;
3135
3136	if (ap_instructions_available()) {
3137		if (ap_qci(&info) == 0)
3138			return info.apxa;
3139	}
3140
3141	return 0;
3142}
3143
3144/*
3145 * The format of the crypto control block (CRYCB) is specified in the 3 low
3146 * order bits of the CRYCB designation (CRYCBD) field as follows:
3147 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3148 *	     AP extended addressing (APXA) facility are installed.
3149 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3150 * Format 2: Both the APXA and MSAX3 facilities are installed
3151 */
3152static void kvm_s390_set_crycb_format(struct kvm *kvm)
3153{
3154	kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3155
3156	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3157	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3158
3159	/* Check whether MSAX3 is installed */
3160	if (!test_kvm_facility(kvm, 76))
3161		return;
3162
3163	if (kvm_s390_apxa_installed())
3164		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3165	else
3166		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3167}
3168
3169/*
3170 * kvm_arch_crypto_set_masks
3171 *
3172 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3173 *	 to be set.
3174 * @apm: the mask identifying the accessible AP adapters
3175 * @aqm: the mask identifying the accessible AP domains
3176 * @adm: the mask identifying the accessible AP control domains
3177 *
3178 * Set the masks that identify the adapters, domains and control domains to
3179 * which the KVM guest is granted access.
3180 *
3181 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3182 *	 function.
3183 */
3184void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3185			       unsigned long *aqm, unsigned long *adm)
3186{
3187	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3188
3189	kvm_s390_vcpu_block_all(kvm);
3190
3191	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3192	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3193		memcpy(crycb->apcb1.apm, apm, 32);
3194		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3195			 apm[0], apm[1], apm[2], apm[3]);
3196		memcpy(crycb->apcb1.aqm, aqm, 32);
3197		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3198			 aqm[0], aqm[1], aqm[2], aqm[3]);
3199		memcpy(crycb->apcb1.adm, adm, 32);
3200		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3201			 adm[0], adm[1], adm[2], adm[3]);
3202		break;
3203	case CRYCB_FORMAT1:
3204	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3205		memcpy(crycb->apcb0.apm, apm, 8);
3206		memcpy(crycb->apcb0.aqm, aqm, 2);
3207		memcpy(crycb->apcb0.adm, adm, 2);
3208		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3209			 apm[0], *((unsigned short *)aqm),
3210			 *((unsigned short *)adm));
3211		break;
3212	default:	/* Can not happen */
3213		break;
3214	}
3215
3216	/* recreate the shadow crycb for each vcpu */
3217	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3218	kvm_s390_vcpu_unblock_all(kvm);
3219}
3220EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3221
3222/*
3223 * kvm_arch_crypto_clear_masks
3224 *
3225 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3226 *	 to be cleared.
3227 *
3228 * Clear the masks that identify the adapters, domains and control domains to
3229 * which the KVM guest is granted access.
3230 *
3231 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3232 *	 function.
3233 */
3234void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3235{
3236	kvm_s390_vcpu_block_all(kvm);
3237
3238	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3239	       sizeof(kvm->arch.crypto.crycb->apcb0));
3240	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3241	       sizeof(kvm->arch.crypto.crycb->apcb1));
3242
3243	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3244	/* recreate the shadow crycb for each vcpu */
3245	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3246	kvm_s390_vcpu_unblock_all(kvm);
3247}
3248EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3249
3250static u64 kvm_s390_get_initial_cpuid(void)
3251{
3252	struct cpuid cpuid;
3253
3254	get_cpu_id(&cpuid);
3255	cpuid.version = 0xff;
3256	return *((u64 *) &cpuid);
3257}
3258
3259static void kvm_s390_crypto_init(struct kvm *kvm)
3260{
3261	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3262	kvm_s390_set_crycb_format(kvm);
3263	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3264
3265	if (!test_kvm_facility(kvm, 76))
3266		return;
3267
3268	/* Enable AES/DEA protected key functions by default */
3269	kvm->arch.crypto.aes_kw = 1;
3270	kvm->arch.crypto.dea_kw = 1;
3271	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3272			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3273	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3274			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3275}
3276
3277static void sca_dispose(struct kvm *kvm)
3278{
3279	if (kvm->arch.use_esca)
3280		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3281	else
3282		free_page((unsigned long)(kvm->arch.sca));
3283	kvm->arch.sca = NULL;
3284}
3285
3286void kvm_arch_free_vm(struct kvm *kvm)
3287{
3288	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3289		kvm_s390_pci_clear_list(kvm);
3290
3291	__kvm_arch_free_vm(kvm);
3292}
3293
3294int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3295{
3296	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3297	int i, rc;
3298	char debug_name[16];
3299	static unsigned long sca_offset;
3300
3301	rc = -EINVAL;
3302#ifdef CONFIG_KVM_S390_UCONTROL
3303	if (type & ~KVM_VM_S390_UCONTROL)
3304		goto out_err;
3305	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3306		goto out_err;
3307#else
3308	if (type)
3309		goto out_err;
3310#endif
3311
3312	rc = s390_enable_sie();
3313	if (rc)
3314		goto out_err;
3315
3316	rc = -ENOMEM;
3317
3318	if (!sclp.has_64bscao)
3319		alloc_flags |= GFP_DMA;
3320	rwlock_init(&kvm->arch.sca_lock);
3321	/* start with basic SCA */
3322	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3323	if (!kvm->arch.sca)
3324		goto out_err;
3325	mutex_lock(&kvm_lock);
3326	sca_offset += 16;
3327	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3328		sca_offset = 0;
3329	kvm->arch.sca = (struct bsca_block *)
3330			((char *) kvm->arch.sca + sca_offset);
3331	mutex_unlock(&kvm_lock);
3332
3333	sprintf(debug_name, "kvm-%u", current->pid);
3334
3335	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3336	if (!kvm->arch.dbf)
3337		goto out_err;
3338
3339	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3340	kvm->arch.sie_page2 =
3341	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3342	if (!kvm->arch.sie_page2)
3343		goto out_err;
3344
3345	kvm->arch.sie_page2->kvm = kvm;
3346	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3347
3348	for (i = 0; i < kvm_s390_fac_size(); i++) {
3349		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3350					      (kvm_s390_fac_base[i] |
3351					       kvm_s390_fac_ext[i]);
3352		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3353					      kvm_s390_fac_base[i];
3354	}
3355	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3356
3357	/* we are always in czam mode - even on pre z14 machines */
3358	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3359	set_kvm_facility(kvm->arch.model.fac_list, 138);
3360	/* we emulate STHYI in kvm */
3361	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3362	set_kvm_facility(kvm->arch.model.fac_list, 74);
3363	if (MACHINE_HAS_TLB_GUEST) {
3364		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3365		set_kvm_facility(kvm->arch.model.fac_list, 147);
3366	}
3367
3368	if (css_general_characteristics.aiv && test_facility(65))
3369		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3370
3371	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3372	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3373
3374	kvm->arch.model.uv_feat_guest.feat = 0;
3375
3376	kvm_s390_crypto_init(kvm);
3377
3378	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3379		mutex_lock(&kvm->lock);
3380		kvm_s390_pci_init_list(kvm);
3381		kvm_s390_vcpu_pci_enable_interp(kvm);
3382		mutex_unlock(&kvm->lock);
3383	}
3384
3385	mutex_init(&kvm->arch.float_int.ais_lock);
3386	spin_lock_init(&kvm->arch.float_int.lock);
3387	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3388		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3389	init_waitqueue_head(&kvm->arch.ipte_wq);
3390	mutex_init(&kvm->arch.ipte_mutex);
3391
3392	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3393	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3394
3395	if (type & KVM_VM_S390_UCONTROL) {
3396		kvm->arch.gmap = NULL;
3397		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3398	} else {
3399		if (sclp.hamax == U64_MAX)
3400			kvm->arch.mem_limit = TASK_SIZE_MAX;
3401		else
3402			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3403						    sclp.hamax + 1);
3404		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3405		if (!kvm->arch.gmap)
3406			goto out_err;
3407		kvm->arch.gmap->private = kvm;
3408		kvm->arch.gmap->pfault_enabled = 0;
3409	}
3410
3411	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3412	kvm->arch.use_skf = sclp.has_skey;
3413	spin_lock_init(&kvm->arch.start_stop_lock);
3414	kvm_s390_vsie_init(kvm);
3415	if (use_gisa)
3416		kvm_s390_gisa_init(kvm);
3417	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3418	kvm->arch.pv.set_aside = NULL;
3419	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3420
3421	return 0;
3422out_err:
3423	free_page((unsigned long)kvm->arch.sie_page2);
3424	debug_unregister(kvm->arch.dbf);
3425	sca_dispose(kvm);
3426	KVM_EVENT(3, "creation of vm failed: %d", rc);
3427	return rc;
3428}
3429
3430void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3431{
3432	u16 rc, rrc;
3433
3434	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3435	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3436	kvm_s390_clear_local_irqs(vcpu);
3437	kvm_clear_async_pf_completion_queue(vcpu);
3438	if (!kvm_is_ucontrol(vcpu->kvm))
3439		sca_del_vcpu(vcpu);
3440	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3441
3442	if (kvm_is_ucontrol(vcpu->kvm))
3443		gmap_remove(vcpu->arch.gmap);
3444
3445	if (vcpu->kvm->arch.use_cmma)
3446		kvm_s390_vcpu_unsetup_cmma(vcpu);
3447	/* We can not hold the vcpu mutex here, we are already dying */
3448	if (kvm_s390_pv_cpu_get_handle(vcpu))
3449		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3450	free_page((unsigned long)(vcpu->arch.sie_block));
3451}
3452
3453void kvm_arch_destroy_vm(struct kvm *kvm)
3454{
3455	u16 rc, rrc;
3456
3457	kvm_destroy_vcpus(kvm);
3458	sca_dispose(kvm);
3459	kvm_s390_gisa_destroy(kvm);
3460	/*
3461	 * We are already at the end of life and kvm->lock is not taken.
3462	 * This is ok as the file descriptor is closed by now and nobody
3463	 * can mess with the pv state.
3464	 */
3465	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3466	/*
3467	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3468	 * and only if one was registered to begin with. If the VM is
3469	 * currently not protected, but has been previously been protected,
3470	 * then it's possible that the notifier is still registered.
3471	 */
3472	if (kvm->arch.pv.mmu_notifier.ops)
3473		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3474
3475	debug_unregister(kvm->arch.dbf);
3476	free_page((unsigned long)kvm->arch.sie_page2);
3477	if (!kvm_is_ucontrol(kvm))
3478		gmap_remove(kvm->arch.gmap);
3479	kvm_s390_destroy_adapters(kvm);
3480	kvm_s390_clear_float_irqs(kvm);
3481	kvm_s390_vsie_destroy(kvm);
3482	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3483}
3484
3485/* Section: vcpu related */
3486static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3487{
3488	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3489	if (!vcpu->arch.gmap)
3490		return -ENOMEM;
3491	vcpu->arch.gmap->private = vcpu->kvm;
3492
3493	return 0;
3494}
3495
3496static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3497{
3498	if (!kvm_s390_use_sca_entries())
3499		return;
3500	read_lock(&vcpu->kvm->arch.sca_lock);
3501	if (vcpu->kvm->arch.use_esca) {
3502		struct esca_block *sca = vcpu->kvm->arch.sca;
3503
3504		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3505		sca->cpu[vcpu->vcpu_id].sda = 0;
3506	} else {
3507		struct bsca_block *sca = vcpu->kvm->arch.sca;
3508
3509		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3510		sca->cpu[vcpu->vcpu_id].sda = 0;
3511	}
3512	read_unlock(&vcpu->kvm->arch.sca_lock);
3513}
3514
3515static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3516{
3517	if (!kvm_s390_use_sca_entries()) {
3518		phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3519
3520		/* we still need the basic sca for the ipte control */
3521		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3522		vcpu->arch.sie_block->scaol = sca_phys;
3523		return;
3524	}
3525	read_lock(&vcpu->kvm->arch.sca_lock);
3526	if (vcpu->kvm->arch.use_esca) {
3527		struct esca_block *sca = vcpu->kvm->arch.sca;
3528		phys_addr_t sca_phys = virt_to_phys(sca);
3529
3530		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3531		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3532		vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3533		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3534		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3535	} else {
3536		struct bsca_block *sca = vcpu->kvm->arch.sca;
3537		phys_addr_t sca_phys = virt_to_phys(sca);
3538
3539		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3540		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3541		vcpu->arch.sie_block->scaol = sca_phys;
3542		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3543	}
3544	read_unlock(&vcpu->kvm->arch.sca_lock);
3545}
3546
3547/* Basic SCA to Extended SCA data copy routines */
3548static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3549{
3550	d->sda = s->sda;
3551	d->sigp_ctrl.c = s->sigp_ctrl.c;
3552	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3553}
3554
3555static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3556{
3557	int i;
3558
3559	d->ipte_control = s->ipte_control;
3560	d->mcn[0] = s->mcn;
3561	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3562		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3563}
3564
3565static int sca_switch_to_extended(struct kvm *kvm)
3566{
3567	struct bsca_block *old_sca = kvm->arch.sca;
3568	struct esca_block *new_sca;
3569	struct kvm_vcpu *vcpu;
3570	unsigned long vcpu_idx;
3571	u32 scaol, scaoh;
3572	phys_addr_t new_sca_phys;
3573
3574	if (kvm->arch.use_esca)
3575		return 0;
3576
3577	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3578	if (!new_sca)
3579		return -ENOMEM;
3580
3581	new_sca_phys = virt_to_phys(new_sca);
3582	scaoh = new_sca_phys >> 32;
3583	scaol = new_sca_phys & ESCA_SCAOL_MASK;
3584
3585	kvm_s390_vcpu_block_all(kvm);
3586	write_lock(&kvm->arch.sca_lock);
3587
3588	sca_copy_b_to_e(new_sca, old_sca);
3589
3590	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3591		vcpu->arch.sie_block->scaoh = scaoh;
3592		vcpu->arch.sie_block->scaol = scaol;
3593		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3594	}
3595	kvm->arch.sca = new_sca;
3596	kvm->arch.use_esca = 1;
3597
3598	write_unlock(&kvm->arch.sca_lock);
3599	kvm_s390_vcpu_unblock_all(kvm);
3600
3601	free_page((unsigned long)old_sca);
3602
3603	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3604		 old_sca, kvm->arch.sca);
3605	return 0;
3606}
3607
3608static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3609{
3610	int rc;
3611
3612	if (!kvm_s390_use_sca_entries()) {
3613		if (id < KVM_MAX_VCPUS)
3614			return true;
3615		return false;
3616	}
3617	if (id < KVM_S390_BSCA_CPU_SLOTS)
3618		return true;
3619	if (!sclp.has_esca || !sclp.has_64bscao)
3620		return false;
3621
3622	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3623
3624	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3625}
3626
3627/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3628static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3629{
3630	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3631	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3632	vcpu->arch.cputm_start = get_tod_clock_fast();
3633	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3634}
3635
3636/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3637static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3638{
3639	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3640	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3641	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3642	vcpu->arch.cputm_start = 0;
3643	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3644}
3645
3646/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3647static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3648{
3649	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3650	vcpu->arch.cputm_enabled = true;
3651	__start_cpu_timer_accounting(vcpu);
3652}
3653
3654/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3655static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3656{
3657	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3658	__stop_cpu_timer_accounting(vcpu);
3659	vcpu->arch.cputm_enabled = false;
3660}
3661
3662static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3663{
3664	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3665	__enable_cpu_timer_accounting(vcpu);
3666	preempt_enable();
3667}
3668
3669static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3670{
3671	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3672	__disable_cpu_timer_accounting(vcpu);
3673	preempt_enable();
3674}
3675
3676/* set the cpu timer - may only be called from the VCPU thread itself */
3677void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3678{
3679	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3680	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3681	if (vcpu->arch.cputm_enabled)
3682		vcpu->arch.cputm_start = get_tod_clock_fast();
3683	vcpu->arch.sie_block->cputm = cputm;
3684	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3685	preempt_enable();
3686}
3687
3688/* update and get the cpu timer - can also be called from other VCPU threads */
3689__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3690{
3691	unsigned int seq;
3692	__u64 value;
3693
3694	if (unlikely(!vcpu->arch.cputm_enabled))
3695		return vcpu->arch.sie_block->cputm;
3696
3697	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3698	do {
3699		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3700		/*
3701		 * If the writer would ever execute a read in the critical
3702		 * section, e.g. in irq context, we have a deadlock.
3703		 */
3704		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3705		value = vcpu->arch.sie_block->cputm;
3706		/* if cputm_start is 0, accounting is being started/stopped */
3707		if (likely(vcpu->arch.cputm_start))
3708			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3709	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3710	preempt_enable();
3711	return value;
3712}
3713
3714void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3715{
3716
3717	gmap_enable(vcpu->arch.enabled_gmap);
3718	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3719	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3720		__start_cpu_timer_accounting(vcpu);
3721	vcpu->cpu = cpu;
3722}
3723
3724void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3725{
3726	vcpu->cpu = -1;
3727	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3728		__stop_cpu_timer_accounting(vcpu);
3729	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3730	vcpu->arch.enabled_gmap = gmap_get_enabled();
3731	gmap_disable(vcpu->arch.enabled_gmap);
3732
3733}
3734
3735void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3736{
3737	mutex_lock(&vcpu->kvm->lock);
3738	preempt_disable();
3739	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3740	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3741	preempt_enable();
3742	mutex_unlock(&vcpu->kvm->lock);
3743	if (!kvm_is_ucontrol(vcpu->kvm)) {
3744		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3745		sca_add_vcpu(vcpu);
3746	}
3747	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3748		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3749	/* make vcpu_load load the right gmap on the first trigger */
3750	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3751}
3752
3753static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3754{
3755	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3756	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3757		return true;
3758	return false;
3759}
3760
3761static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3762{
3763	/* At least one ECC subfunction must be present */
3764	return kvm_has_pckmo_subfunc(kvm, 32) ||
3765	       kvm_has_pckmo_subfunc(kvm, 33) ||
3766	       kvm_has_pckmo_subfunc(kvm, 34) ||
3767	       kvm_has_pckmo_subfunc(kvm, 40) ||
3768	       kvm_has_pckmo_subfunc(kvm, 41);
3769
3770}
3771
3772static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3773{
3774	/*
3775	 * If the AP instructions are not being interpreted and the MSAX3
3776	 * facility is not configured for the guest, there is nothing to set up.
3777	 */
3778	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3779		return;
3780
3781	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3782	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3783	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3784	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3785
3786	if (vcpu->kvm->arch.crypto.apie)
3787		vcpu->arch.sie_block->eca |= ECA_APIE;
3788
3789	/* Set up protected key support */
3790	if (vcpu->kvm->arch.crypto.aes_kw) {
3791		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3792		/* ecc is also wrapped with AES key */
3793		if (kvm_has_pckmo_ecc(vcpu->kvm))
3794			vcpu->arch.sie_block->ecd |= ECD_ECC;
3795	}
3796
3797	if (vcpu->kvm->arch.crypto.dea_kw)
3798		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3799}
3800
3801void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3802{
3803	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3804	vcpu->arch.sie_block->cbrlo = 0;
3805}
3806
3807int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3808{
3809	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3810
3811	if (!cbrlo_page)
3812		return -ENOMEM;
3813
3814	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3815	return 0;
3816}
3817
3818static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3819{
3820	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3821
3822	vcpu->arch.sie_block->ibc = model->ibc;
3823	if (test_kvm_facility(vcpu->kvm, 7))
3824		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3825}
3826
3827static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3828{
3829	int rc = 0;
3830	u16 uvrc, uvrrc;
3831
3832	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3833						    CPUSTAT_SM |
3834						    CPUSTAT_STOPPED);
3835
3836	if (test_kvm_facility(vcpu->kvm, 78))
3837		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3838	else if (test_kvm_facility(vcpu->kvm, 8))
3839		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3840
3841	kvm_s390_vcpu_setup_model(vcpu);
3842
3843	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3844	if (MACHINE_HAS_ESOP)
3845		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3846	if (test_kvm_facility(vcpu->kvm, 9))
3847		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3848	if (test_kvm_facility(vcpu->kvm, 11))
3849		vcpu->arch.sie_block->ecb |= ECB_PTF;
3850	if (test_kvm_facility(vcpu->kvm, 73))
3851		vcpu->arch.sie_block->ecb |= ECB_TE;
3852	if (!kvm_is_ucontrol(vcpu->kvm))
3853		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3854
3855	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3856		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3857	if (test_kvm_facility(vcpu->kvm, 130))
3858		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3859	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3860	if (sclp.has_cei)
3861		vcpu->arch.sie_block->eca |= ECA_CEI;
3862	if (sclp.has_ib)
3863		vcpu->arch.sie_block->eca |= ECA_IB;
3864	if (sclp.has_siif)
3865		vcpu->arch.sie_block->eca |= ECA_SII;
3866	if (sclp.has_sigpif)
3867		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3868	if (test_kvm_facility(vcpu->kvm, 129)) {
3869		vcpu->arch.sie_block->eca |= ECA_VX;
3870		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3871	}
3872	if (test_kvm_facility(vcpu->kvm, 139))
3873		vcpu->arch.sie_block->ecd |= ECD_MEF;
3874	if (test_kvm_facility(vcpu->kvm, 156))
3875		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3876	if (vcpu->arch.sie_block->gd) {
3877		vcpu->arch.sie_block->eca |= ECA_AIV;
3878		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3879			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3880	}
3881	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3882	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3883
3884	if (sclp.has_kss)
3885		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3886	else
3887		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3888
3889	if (vcpu->kvm->arch.use_cmma) {
3890		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3891		if (rc)
3892			return rc;
3893	}
3894	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3895	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3896
3897	vcpu->arch.sie_block->hpid = HPID_KVM;
3898
3899	kvm_s390_vcpu_crypto_setup(vcpu);
3900
3901	kvm_s390_vcpu_pci_setup(vcpu);
3902
3903	mutex_lock(&vcpu->kvm->lock);
3904	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3905		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3906		if (rc)
3907			kvm_s390_vcpu_unsetup_cmma(vcpu);
3908	}
3909	mutex_unlock(&vcpu->kvm->lock);
3910
3911	return rc;
3912}
3913
3914int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3915{
3916	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3917		return -EINVAL;
3918	return 0;
3919}
3920
3921int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3922{
3923	struct sie_page *sie_page;
3924	int rc;
3925
3926	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3927	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3928	if (!sie_page)
3929		return -ENOMEM;
3930
3931	vcpu->arch.sie_block = &sie_page->sie_block;
3932	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3933
3934	/* the real guest size will always be smaller than msl */
3935	vcpu->arch.sie_block->mso = 0;
3936	vcpu->arch.sie_block->msl = sclp.hamax;
3937
3938	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3939	spin_lock_init(&vcpu->arch.local_int.lock);
3940	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3941	seqcount_init(&vcpu->arch.cputm_seqcount);
3942
3943	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3944	kvm_clear_async_pf_completion_queue(vcpu);
3945	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3946				    KVM_SYNC_GPRS |
3947				    KVM_SYNC_ACRS |
3948				    KVM_SYNC_CRS |
3949				    KVM_SYNC_ARCH0 |
3950				    KVM_SYNC_PFAULT |
3951				    KVM_SYNC_DIAG318;
3952	vcpu->arch.acrs_loaded = false;
3953	kvm_s390_set_prefix(vcpu, 0);
3954	if (test_kvm_facility(vcpu->kvm, 64))
3955		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3956	if (test_kvm_facility(vcpu->kvm, 82))
3957		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3958	if (test_kvm_facility(vcpu->kvm, 133))
3959		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3960	if (test_kvm_facility(vcpu->kvm, 156))
3961		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3962	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3963	 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
3964	 */
3965	if (cpu_has_vx())
3966		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3967	else
3968		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3969
3970	if (kvm_is_ucontrol(vcpu->kvm)) {
3971		rc = __kvm_ucontrol_vcpu_init(vcpu);
3972		if (rc)
3973			goto out_free_sie_block;
3974	}
3975
3976	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3977		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3978	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3979
3980	rc = kvm_s390_vcpu_setup(vcpu);
3981	if (rc)
3982		goto out_ucontrol_uninit;
3983
3984	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3985	return 0;
3986
3987out_ucontrol_uninit:
3988	if (kvm_is_ucontrol(vcpu->kvm))
3989		gmap_remove(vcpu->arch.gmap);
3990out_free_sie_block:
3991	free_page((unsigned long)(vcpu->arch.sie_block));
3992	return rc;
3993}
3994
3995int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3996{
3997	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3998	return kvm_s390_vcpu_has_irq(vcpu, 0);
3999}
4000
4001bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4002{
4003	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4004}
4005
4006void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
4007{
4008	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4009	exit_sie(vcpu);
4010}
4011
4012void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
4013{
4014	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4015}
4016
4017static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
4018{
4019	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4020	exit_sie(vcpu);
4021}
4022
4023bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
4024{
4025	return atomic_read(&vcpu->arch.sie_block->prog20) &
4026	       (PROG_BLOCK_SIE | PROG_REQUEST);
4027}
4028
4029static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
4030{
4031	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4032}
4033
4034/*
4035 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
4036 * If the CPU is not running (e.g. waiting as idle) the function will
4037 * return immediately. */
4038void exit_sie(struct kvm_vcpu *vcpu)
4039{
4040	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
4041	kvm_s390_vsie_kick(vcpu);
4042	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4043		cpu_relax();
4044}
4045
4046/* Kick a guest cpu out of SIE to process a request synchronously */
4047void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
4048{
4049	__kvm_make_request(req, vcpu);
4050	kvm_s390_vcpu_request(vcpu);
4051}
4052
4053static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
4054			      unsigned long end)
4055{
4056	struct kvm *kvm = gmap->private;
4057	struct kvm_vcpu *vcpu;
4058	unsigned long prefix;
4059	unsigned long i;
4060
4061	trace_kvm_s390_gmap_notifier(start, end, gmap_is_shadow(gmap));
4062
4063	if (gmap_is_shadow(gmap))
4064		return;
4065	if (start >= 1UL << 31)
4066		/* We are only interested in prefix pages */
4067		return;
4068	kvm_for_each_vcpu(i, vcpu, kvm) {
4069		/* match against both prefix pages */
4070		prefix = kvm_s390_get_prefix(vcpu);
4071		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
4072			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
4073				   start, end);
4074			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4075		}
4076	}
4077}
4078
4079bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
4080{
4081	/* do not poll with more than halt_poll_max_steal percent of steal time */
4082	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
4083	    READ_ONCE(halt_poll_max_steal)) {
4084		vcpu->stat.halt_no_poll_steal++;
4085		return true;
4086	}
4087	return false;
4088}
4089
4090int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4091{
4092	/* kvm common code refers to this, but never calls it */
4093	BUG();
4094	return 0;
4095}
4096
4097static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4098					   struct kvm_one_reg *reg)
4099{
4100	int r = -EINVAL;
4101
4102	switch (reg->id) {
4103	case KVM_REG_S390_TODPR:
4104		r = put_user(vcpu->arch.sie_block->todpr,
4105			     (u32 __user *)reg->addr);
4106		break;
4107	case KVM_REG_S390_EPOCHDIFF:
4108		r = put_user(vcpu->arch.sie_block->epoch,
4109			     (u64 __user *)reg->addr);
4110		break;
4111	case KVM_REG_S390_CPU_TIMER:
4112		r = put_user(kvm_s390_get_cpu_timer(vcpu),
4113			     (u64 __user *)reg->addr);
4114		break;
4115	case KVM_REG_S390_CLOCK_COMP:
4116		r = put_user(vcpu->arch.sie_block->ckc,
4117			     (u64 __user *)reg->addr);
4118		break;
4119	case KVM_REG_S390_PFTOKEN:
4120		r = put_user(vcpu->arch.pfault_token,
4121			     (u64 __user *)reg->addr);
4122		break;
4123	case KVM_REG_S390_PFCOMPARE:
4124		r = put_user(vcpu->arch.pfault_compare,
4125			     (u64 __user *)reg->addr);
4126		break;
4127	case KVM_REG_S390_PFSELECT:
4128		r = put_user(vcpu->arch.pfault_select,
4129			     (u64 __user *)reg->addr);
4130		break;
4131	case KVM_REG_S390_PP:
4132		r = put_user(vcpu->arch.sie_block->pp,
4133			     (u64 __user *)reg->addr);
4134		break;
4135	case KVM_REG_S390_GBEA:
4136		r = put_user(vcpu->arch.sie_block->gbea,
4137			     (u64 __user *)reg->addr);
4138		break;
4139	default:
4140		break;
4141	}
4142
4143	return r;
4144}
4145
4146static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4147					   struct kvm_one_reg *reg)
4148{
4149	int r = -EINVAL;
4150	__u64 val;
4151
4152	switch (reg->id) {
4153	case KVM_REG_S390_TODPR:
4154		r = get_user(vcpu->arch.sie_block->todpr,
4155			     (u32 __user *)reg->addr);
4156		break;
4157	case KVM_REG_S390_EPOCHDIFF:
4158		r = get_user(vcpu->arch.sie_block->epoch,
4159			     (u64 __user *)reg->addr);
4160		break;
4161	case KVM_REG_S390_CPU_TIMER:
4162		r = get_user(val, (u64 __user *)reg->addr);
4163		if (!r)
4164			kvm_s390_set_cpu_timer(vcpu, val);
4165		break;
4166	case KVM_REG_S390_CLOCK_COMP:
4167		r = get_user(vcpu->arch.sie_block->ckc,
4168			     (u64 __user *)reg->addr);
4169		break;
4170	case KVM_REG_S390_PFTOKEN:
4171		r = get_user(vcpu->arch.pfault_token,
4172			     (u64 __user *)reg->addr);
4173		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4174			kvm_clear_async_pf_completion_queue(vcpu);
4175		break;
4176	case KVM_REG_S390_PFCOMPARE:
4177		r = get_user(vcpu->arch.pfault_compare,
4178			     (u64 __user *)reg->addr);
4179		break;
4180	case KVM_REG_S390_PFSELECT:
4181		r = get_user(vcpu->arch.pfault_select,
4182			     (u64 __user *)reg->addr);
4183		break;
4184	case KVM_REG_S390_PP:
4185		r = get_user(vcpu->arch.sie_block->pp,
4186			     (u64 __user *)reg->addr);
4187		break;
4188	case KVM_REG_S390_GBEA:
4189		r = get_user(vcpu->arch.sie_block->gbea,
4190			     (u64 __user *)reg->addr);
4191		break;
4192	default:
4193		break;
4194	}
4195
4196	return r;
4197}
4198
4199static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4200{
4201	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4202	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4203	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4204
4205	kvm_clear_async_pf_completion_queue(vcpu);
4206	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4207		kvm_s390_vcpu_stop(vcpu);
4208	kvm_s390_clear_local_irqs(vcpu);
4209}
4210
4211static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4212{
4213	/* Initial reset is a superset of the normal reset */
4214	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4215
4216	/*
4217	 * This equals initial cpu reset in pop, but we don't switch to ESA.
4218	 * We do not only reset the internal data, but also ...
4219	 */
4220	vcpu->arch.sie_block->gpsw.mask = 0;
4221	vcpu->arch.sie_block->gpsw.addr = 0;
4222	kvm_s390_set_prefix(vcpu, 0);
4223	kvm_s390_set_cpu_timer(vcpu, 0);
4224	vcpu->arch.sie_block->ckc = 0;
4225	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4226	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4227	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4228
4229	/* ... the data in sync regs */
4230	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4231	vcpu->run->s.regs.ckc = 0;
4232	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4233	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4234	vcpu->run->psw_addr = 0;
4235	vcpu->run->psw_mask = 0;
4236	vcpu->run->s.regs.todpr = 0;
4237	vcpu->run->s.regs.cputm = 0;
4238	vcpu->run->s.regs.ckc = 0;
4239	vcpu->run->s.regs.pp = 0;
4240	vcpu->run->s.regs.gbea = 1;
4241	vcpu->run->s.regs.fpc = 0;
4242	/*
4243	 * Do not reset these registers in the protected case, as some of
4244	 * them are overlaid and they are not accessible in this case
4245	 * anyway.
4246	 */
4247	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4248		vcpu->arch.sie_block->gbea = 1;
4249		vcpu->arch.sie_block->pp = 0;
4250		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4251		vcpu->arch.sie_block->todpr = 0;
4252	}
4253}
4254
4255static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4256{
4257	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4258
4259	/* Clear reset is a superset of the initial reset */
4260	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4261
4262	memset(&regs->gprs, 0, sizeof(regs->gprs));
4263	memset(&regs->vrs, 0, sizeof(regs->vrs));
4264	memset(&regs->acrs, 0, sizeof(regs->acrs));
4265	memset(&regs->gscb, 0, sizeof(regs->gscb));
4266
4267	regs->etoken = 0;
4268	regs->etoken_extension = 0;
4269}
4270
4271int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4272{
4273	vcpu_load(vcpu);
4274	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4275	vcpu_put(vcpu);
4276	return 0;
4277}
4278
4279int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4280{
4281	vcpu_load(vcpu);
4282	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4283	vcpu_put(vcpu);
4284	return 0;
4285}
4286
4287int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4288				  struct kvm_sregs *sregs)
4289{
4290	vcpu_load(vcpu);
4291
4292	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4293	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4294
4295	vcpu_put(vcpu);
4296	return 0;
4297}
4298
4299int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4300				  struct kvm_sregs *sregs)
4301{
4302	vcpu_load(vcpu);
4303
4304	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4305	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4306
4307	vcpu_put(vcpu);
4308	return 0;
4309}
4310
4311int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4312{
4313	int ret = 0;
4314
4315	vcpu_load(vcpu);
4316
4317	vcpu->run->s.regs.fpc = fpu->fpc;
4318	if (cpu_has_vx())
4319		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4320				 (freg_t *) fpu->fprs);
4321	else
4322		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4323
4324	vcpu_put(vcpu);
4325	return ret;
4326}
4327
4328int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4329{
4330	vcpu_load(vcpu);
4331
4332	if (cpu_has_vx())
4333		convert_vx_to_fp((freg_t *) fpu->fprs,
4334				 (__vector128 *) vcpu->run->s.regs.vrs);
4335	else
4336		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4337	fpu->fpc = vcpu->run->s.regs.fpc;
4338
4339	vcpu_put(vcpu);
4340	return 0;
4341}
4342
4343static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4344{
4345	int rc = 0;
4346
4347	if (!is_vcpu_stopped(vcpu))
4348		rc = -EBUSY;
4349	else {
4350		vcpu->run->psw_mask = psw.mask;
4351		vcpu->run->psw_addr = psw.addr;
4352	}
4353	return rc;
4354}
4355
4356int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4357				  struct kvm_translation *tr)
4358{
4359	return -EINVAL; /* not implemented yet */
4360}
4361
4362#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4363			      KVM_GUESTDBG_USE_HW_BP | \
4364			      KVM_GUESTDBG_ENABLE)
4365
4366int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4367					struct kvm_guest_debug *dbg)
4368{
4369	int rc = 0;
4370
4371	vcpu_load(vcpu);
4372
4373	vcpu->guest_debug = 0;
4374	kvm_s390_clear_bp_data(vcpu);
4375
4376	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4377		rc = -EINVAL;
4378		goto out;
4379	}
4380	if (!sclp.has_gpere) {
4381		rc = -EINVAL;
4382		goto out;
4383	}
4384
4385	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4386		vcpu->guest_debug = dbg->control;
4387		/* enforce guest PER */
4388		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4389
4390		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4391			rc = kvm_s390_import_bp_data(vcpu, dbg);
4392	} else {
4393		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4394		vcpu->arch.guestdbg.last_bp = 0;
4395	}
4396
4397	if (rc) {
4398		vcpu->guest_debug = 0;
4399		kvm_s390_clear_bp_data(vcpu);
4400		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4401	}
4402
4403out:
4404	vcpu_put(vcpu);
4405	return rc;
4406}
4407
4408int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4409				    struct kvm_mp_state *mp_state)
4410{
4411	int ret;
4412
4413	vcpu_load(vcpu);
4414
4415	/* CHECK_STOP and LOAD are not supported yet */
4416	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4417				      KVM_MP_STATE_OPERATING;
4418
4419	vcpu_put(vcpu);
4420	return ret;
4421}
4422
4423int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4424				    struct kvm_mp_state *mp_state)
4425{
4426	int rc = 0;
4427
4428	vcpu_load(vcpu);
4429
4430	/* user space knows about this interface - let it control the state */
4431	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4432
4433	switch (mp_state->mp_state) {
4434	case KVM_MP_STATE_STOPPED:
4435		rc = kvm_s390_vcpu_stop(vcpu);
4436		break;
4437	case KVM_MP_STATE_OPERATING:
4438		rc = kvm_s390_vcpu_start(vcpu);
4439		break;
4440	case KVM_MP_STATE_LOAD:
4441		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4442			rc = -ENXIO;
4443			break;
4444		}
4445		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4446		break;
4447	case KVM_MP_STATE_CHECK_STOP:
4448		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4449	default:
4450		rc = -ENXIO;
4451	}
4452
4453	vcpu_put(vcpu);
4454	return rc;
4455}
4456
4457static bool ibs_enabled(struct kvm_vcpu *vcpu)
4458{
4459	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4460}
4461
4462static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4463{
4464retry:
4465	kvm_s390_vcpu_request_handled(vcpu);
4466	if (!kvm_request_pending(vcpu))
4467		return 0;
4468	/*
4469	 * If the guest prefix changed, re-arm the ipte notifier for the
4470	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4471	 * This ensures that the ipte instruction for this request has
4472	 * already finished. We might race against a second unmapper that
4473	 * wants to set the blocking bit. Lets just retry the request loop.
4474	 */
4475	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4476		int rc;
4477		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4478					  kvm_s390_get_prefix(vcpu),
4479					  PAGE_SIZE * 2, PROT_WRITE);
4480		if (rc) {
4481			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4482			return rc;
4483		}
4484		goto retry;
4485	}
4486
4487	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4488		vcpu->arch.sie_block->ihcpu = 0xffff;
4489		goto retry;
4490	}
4491
4492	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4493		if (!ibs_enabled(vcpu)) {
4494			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4495			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4496		}
4497		goto retry;
4498	}
4499
4500	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4501		if (ibs_enabled(vcpu)) {
4502			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4503			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4504		}
4505		goto retry;
4506	}
4507
4508	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4509		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4510		goto retry;
4511	}
4512
4513	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4514		/*
4515		 * Disable CMM virtualization; we will emulate the ESSA
4516		 * instruction manually, in order to provide additional
4517		 * functionalities needed for live migration.
4518		 */
4519		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4520		goto retry;
4521	}
4522
4523	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4524		/*
4525		 * Re-enable CMM virtualization if CMMA is available and
4526		 * CMM has been used.
4527		 */
4528		if ((vcpu->kvm->arch.use_cmma) &&
4529		    (vcpu->kvm->mm->context.uses_cmm))
4530			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4531		goto retry;
4532	}
4533
4534	/* we left the vsie handler, nothing to do, just clear the request */
4535	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4536
4537	return 0;
4538}
4539
4540static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4541{
4542	struct kvm_vcpu *vcpu;
4543	union tod_clock clk;
4544	unsigned long i;
4545
4546	preempt_disable();
4547
4548	store_tod_clock_ext(&clk);
4549
4550	kvm->arch.epoch = gtod->tod - clk.tod;
4551	kvm->arch.epdx = 0;
4552	if (test_kvm_facility(kvm, 139)) {
4553		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4554		if (kvm->arch.epoch > gtod->tod)
4555			kvm->arch.epdx -= 1;
4556	}
4557
4558	kvm_s390_vcpu_block_all(kvm);
4559	kvm_for_each_vcpu(i, vcpu, kvm) {
4560		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4561		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4562	}
4563
4564	kvm_s390_vcpu_unblock_all(kvm);
4565	preempt_enable();
4566}
4567
4568int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4569{
4570	if (!mutex_trylock(&kvm->lock))
4571		return 0;
4572	__kvm_s390_set_tod_clock(kvm, gtod);
4573	mutex_unlock(&kvm->lock);
4574	return 1;
4575}
4576
4577/**
4578 * kvm_arch_fault_in_page - fault-in guest page if necessary
4579 * @vcpu: The corresponding virtual cpu
4580 * @gpa: Guest physical address
4581 * @writable: Whether the page should be writable or not
4582 *
4583 * Make sure that a guest page has been faulted-in on the host.
4584 *
4585 * Return: Zero on success, negative error code otherwise.
4586 */
4587long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
4588{
4589	return gmap_fault(vcpu->arch.gmap, gpa,
4590			  writable ? FAULT_FLAG_WRITE : 0);
4591}
4592
4593static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4594				      unsigned long token)
4595{
4596	struct kvm_s390_interrupt inti;
4597	struct kvm_s390_irq irq;
4598
4599	if (start_token) {
4600		irq.u.ext.ext_params2 = token;
4601		irq.type = KVM_S390_INT_PFAULT_INIT;
4602		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4603	} else {
4604		inti.type = KVM_S390_INT_PFAULT_DONE;
4605		inti.parm64 = token;
4606		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4607	}
4608}
4609
4610bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4611				     struct kvm_async_pf *work)
4612{
4613	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4614	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4615
4616	return true;
4617}
4618
4619void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4620				 struct kvm_async_pf *work)
4621{
4622	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4623	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4624}
4625
4626void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4627			       struct kvm_async_pf *work)
4628{
4629	/* s390 will always inject the page directly */
4630}
4631
4632bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4633{
4634	/*
4635	 * s390 will always inject the page directly,
4636	 * but we still want check_async_completion to cleanup
4637	 */
4638	return true;
4639}
4640
4641static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4642{
4643	hva_t hva;
4644	struct kvm_arch_async_pf arch;
4645
4646	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4647		return false;
4648	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4649	    vcpu->arch.pfault_compare)
4650		return false;
4651	if (psw_extint_disabled(vcpu))
4652		return false;
4653	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4654		return false;
4655	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4656		return false;
4657	if (!vcpu->arch.gmap->pfault_enabled)
4658		return false;
4659
4660	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4661	hva += current->thread.gmap_addr & ~PAGE_MASK;
4662	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4663		return false;
4664
4665	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4666}
4667
4668static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4669{
4670	int rc, cpuflags;
4671
4672	/*
4673	 * On s390 notifications for arriving pages will be delivered directly
4674	 * to the guest but the house keeping for completed pfaults is
4675	 * handled outside the worker.
4676	 */
4677	kvm_check_async_pf_completion(vcpu);
4678
4679	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4680	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4681
4682	if (need_resched())
4683		schedule();
4684
4685	if (!kvm_is_ucontrol(vcpu->kvm)) {
4686		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4687		if (rc || guestdbg_exit_pending(vcpu))
4688			return rc;
4689	}
4690
4691	rc = kvm_s390_handle_requests(vcpu);
4692	if (rc)
4693		return rc;
4694
4695	if (guestdbg_enabled(vcpu)) {
4696		kvm_s390_backup_guest_per_regs(vcpu);
4697		kvm_s390_patch_guest_per_regs(vcpu);
4698	}
4699
4700	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4701
4702	vcpu->arch.sie_block->icptcode = 0;
4703	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4704	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4705	trace_kvm_s390_sie_enter(vcpu, cpuflags);
4706
4707	return 0;
4708}
4709
4710static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4711{
4712	struct kvm_s390_pgm_info pgm_info = {
4713		.code = PGM_ADDRESSING,
4714	};
4715	u8 opcode, ilen;
4716	int rc;
4717
4718	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4719	trace_kvm_s390_sie_fault(vcpu);
4720
4721	/*
4722	 * We want to inject an addressing exception, which is defined as a
4723	 * suppressing or terminating exception. However, since we came here
4724	 * by a DAT access exception, the PSW still points to the faulting
4725	 * instruction since DAT exceptions are nullifying. So we've got
4726	 * to look up the current opcode to get the length of the instruction
4727	 * to be able to forward the PSW.
4728	 */
4729	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4730	ilen = insn_length(opcode);
4731	if (rc < 0) {
4732		return rc;
4733	} else if (rc) {
4734		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4735		 * Forward by arbitrary ilc, injection will take care of
4736		 * nullification if necessary.
4737		 */
4738		pgm_info = vcpu->arch.pgm;
4739		ilen = 4;
4740	}
4741	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4742	kvm_s390_forward_psw(vcpu, ilen);
4743	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4744}
4745
4746static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4747{
4748	struct mcck_volatile_info *mcck_info;
4749	struct sie_page *sie_page;
4750
4751	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4752		   vcpu->arch.sie_block->icptcode);
4753	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4754
4755	if (guestdbg_enabled(vcpu))
4756		kvm_s390_restore_guest_per_regs(vcpu);
4757
4758	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4759	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4760
4761	if (exit_reason == -EINTR) {
4762		VCPU_EVENT(vcpu, 3, "%s", "machine check");
4763		sie_page = container_of(vcpu->arch.sie_block,
4764					struct sie_page, sie_block);
4765		mcck_info = &sie_page->mcck_info;
4766		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4767		return 0;
4768	}
4769
4770	if (vcpu->arch.sie_block->icptcode > 0) {
4771		int rc = kvm_handle_sie_intercept(vcpu);
4772
4773		if (rc != -EOPNOTSUPP)
4774			return rc;
4775		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4776		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4777		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4778		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4779		return -EREMOTE;
4780	} else if (exit_reason != -EFAULT) {
4781		vcpu->stat.exit_null++;
4782		return 0;
4783	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4784		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4785		vcpu->run->s390_ucontrol.trans_exc_code =
4786						current->thread.gmap_addr;
4787		vcpu->run->s390_ucontrol.pgm_code = 0x10;
4788		return -EREMOTE;
4789	} else if (current->thread.gmap_pfault) {
4790		trace_kvm_s390_major_guest_pfault(vcpu);
4791		current->thread.gmap_pfault = 0;
4792		if (kvm_arch_setup_async_pf(vcpu))
4793			return 0;
4794		vcpu->stat.pfault_sync++;
4795		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4796	}
4797	return vcpu_post_run_fault_in_sie(vcpu);
4798}
4799
4800#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4801static int __vcpu_run(struct kvm_vcpu *vcpu)
4802{
4803	int rc, exit_reason;
4804	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4805
4806	/*
4807	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4808	 * ning the guest), so that memslots (and other stuff) are protected
4809	 */
4810	kvm_vcpu_srcu_read_lock(vcpu);
4811
4812	do {
4813		rc = vcpu_pre_run(vcpu);
4814		if (rc || guestdbg_exit_pending(vcpu))
4815			break;
4816
4817		kvm_vcpu_srcu_read_unlock(vcpu);
4818		/*
4819		 * As PF_VCPU will be used in fault handler, between
4820		 * guest_enter and guest_exit should be no uaccess.
4821		 */
4822		local_irq_disable();
4823		guest_enter_irqoff();
4824		__disable_cpu_timer_accounting(vcpu);
4825		local_irq_enable();
4826		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4827			memcpy(sie_page->pv_grregs,
4828			       vcpu->run->s.regs.gprs,
4829			       sizeof(sie_page->pv_grregs));
4830		}
4831		exit_reason = sie64a(vcpu->arch.sie_block,
4832				     vcpu->run->s.regs.gprs);
4833		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4834			memcpy(vcpu->run->s.regs.gprs,
4835			       sie_page->pv_grregs,
4836			       sizeof(sie_page->pv_grregs));
4837			/*
4838			 * We're not allowed to inject interrupts on intercepts
4839			 * that leave the guest state in an "in-between" state
4840			 * where the next SIE entry will do a continuation.
4841			 * Fence interrupts in our "internal" PSW.
4842			 */
4843			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4844			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4845				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4846			}
4847		}
4848		local_irq_disable();
4849		__enable_cpu_timer_accounting(vcpu);
4850		guest_exit_irqoff();
4851		local_irq_enable();
4852		kvm_vcpu_srcu_read_lock(vcpu);
4853
4854		rc = vcpu_post_run(vcpu, exit_reason);
4855	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4856
4857	kvm_vcpu_srcu_read_unlock(vcpu);
4858	return rc;
4859}
4860
4861static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4862{
4863	struct kvm_run *kvm_run = vcpu->run;
4864	struct runtime_instr_cb *riccb;
4865	struct gs_cb *gscb;
4866
4867	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4868	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4869	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4870	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4871	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4872		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4873		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4874		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4875	}
4876	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4877		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4878		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4879		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4880		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4881			kvm_clear_async_pf_completion_queue(vcpu);
4882	}
4883	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4884		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4885		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4886		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4887	}
4888	/*
4889	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4890	 * we should enable RI here instead of doing the lazy enablement.
4891	 */
4892	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4893	    test_kvm_facility(vcpu->kvm, 64) &&
4894	    riccb->v &&
4895	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4896		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4897		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4898	}
4899	/*
4900	 * If userspace sets the gscb (e.g. after migration) to non-zero,
4901	 * we should enable GS here instead of doing the lazy enablement.
4902	 */
4903	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4904	    test_kvm_facility(vcpu->kvm, 133) &&
4905	    gscb->gssm &&
4906	    !vcpu->arch.gs_enabled) {
4907		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4908		vcpu->arch.sie_block->ecb |= ECB_GS;
4909		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4910		vcpu->arch.gs_enabled = 1;
4911	}
4912	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4913	    test_kvm_facility(vcpu->kvm, 82)) {
4914		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4915		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4916	}
4917	if (MACHINE_HAS_GS) {
4918		preempt_disable();
4919		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4920		if (current->thread.gs_cb) {
4921			vcpu->arch.host_gscb = current->thread.gs_cb;
4922			save_gs_cb(vcpu->arch.host_gscb);
4923		}
4924		if (vcpu->arch.gs_enabled) {
4925			current->thread.gs_cb = (struct gs_cb *)
4926						&vcpu->run->s.regs.gscb;
4927			restore_gs_cb(current->thread.gs_cb);
4928		}
4929		preempt_enable();
4930	}
4931	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4932}
4933
4934static void sync_regs(struct kvm_vcpu *vcpu)
4935{
4936	struct kvm_run *kvm_run = vcpu->run;
4937
4938	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4939		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4940	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4941		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4942		/* some control register changes require a tlb flush */
4943		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4944	}
4945	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4946		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4947		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4948	}
4949	save_access_regs(vcpu->arch.host_acrs);
4950	restore_access_regs(vcpu->run->s.regs.acrs);
4951	vcpu->arch.acrs_loaded = true;
4952	kvm_s390_fpu_load(vcpu->run);
4953	/* Sync fmt2 only data */
4954	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4955		sync_regs_fmt2(vcpu);
4956	} else {
4957		/*
4958		 * In several places we have to modify our internal view to
4959		 * not do things that are disallowed by the ultravisor. For
4960		 * example we must not inject interrupts after specific exits
4961		 * (e.g. 112 prefix page not secure). We do this by turning
4962		 * off the machine check, external and I/O interrupt bits
4963		 * of our PSW copy. To avoid getting validity intercepts, we
4964		 * do only accept the condition code from userspace.
4965		 */
4966		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4967		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4968						   PSW_MASK_CC;
4969	}
4970
4971	kvm_run->kvm_dirty_regs = 0;
4972}
4973
4974static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4975{
4976	struct kvm_run *kvm_run = vcpu->run;
4977
4978	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4979	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4980	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4981	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4982	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4983	if (MACHINE_HAS_GS) {
4984		preempt_disable();
4985		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
4986		if (vcpu->arch.gs_enabled)
4987			save_gs_cb(current->thread.gs_cb);
4988		current->thread.gs_cb = vcpu->arch.host_gscb;
4989		restore_gs_cb(vcpu->arch.host_gscb);
4990		if (!vcpu->arch.host_gscb)
4991			local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
4992		vcpu->arch.host_gscb = NULL;
4993		preempt_enable();
4994	}
4995	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4996}
4997
4998static void store_regs(struct kvm_vcpu *vcpu)
4999{
5000	struct kvm_run *kvm_run = vcpu->run;
5001
5002	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5003	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5004	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
5005	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5006	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
5007	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5008	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5009	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5010	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5011	save_access_regs(vcpu->run->s.regs.acrs);
5012	restore_access_regs(vcpu->arch.host_acrs);
5013	vcpu->arch.acrs_loaded = false;
5014	kvm_s390_fpu_store(vcpu->run);
5015	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
5016		store_regs_fmt2(vcpu);
5017}
5018
5019int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
5020{
5021	struct kvm_run *kvm_run = vcpu->run;
5022	DECLARE_KERNEL_FPU_ONSTACK32(fpu);
5023	int rc;
5024
5025	/*
5026	 * Running a VM while dumping always has the potential to
5027	 * produce inconsistent dump data. But for PV vcpus a SIE
5028	 * entry while dumping could also lead to a fatal validity
5029	 * intercept which we absolutely want to avoid.
5030	 */
5031	if (vcpu->kvm->arch.pv.dumping)
5032		return -EINVAL;
5033
5034	if (kvm_run->immediate_exit)
5035		return -EINTR;
5036
5037	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
5038	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
5039		return -EINVAL;
5040
5041	vcpu_load(vcpu);
5042
5043	if (guestdbg_exit_pending(vcpu)) {
5044		kvm_s390_prepare_debug_exit(vcpu);
5045		rc = 0;
5046		goto out;
5047	}
5048
5049	kvm_sigset_activate(vcpu);
5050
5051	/*
5052	 * no need to check the return value of vcpu_start as it can only have
5053	 * an error for protvirt, but protvirt means user cpu state
5054	 */
5055	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
5056		kvm_s390_vcpu_start(vcpu);
5057	} else if (is_vcpu_stopped(vcpu)) {
5058		pr_err_ratelimited("can't run stopped vcpu %d\n",
5059				   vcpu->vcpu_id);
5060		rc = -EINVAL;
5061		goto out;
5062	}
5063
5064	kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
5065	sync_regs(vcpu);
5066	enable_cpu_timer_accounting(vcpu);
5067
5068	might_fault();
5069	rc = __vcpu_run(vcpu);
5070
5071	if (signal_pending(current) && !rc) {
5072		kvm_run->exit_reason = KVM_EXIT_INTR;
5073		rc = -EINTR;
5074	}
5075
5076	if (guestdbg_exit_pending(vcpu) && !rc)  {
5077		kvm_s390_prepare_debug_exit(vcpu);
5078		rc = 0;
5079	}
5080
5081	if (rc == -EREMOTE) {
5082		/* userspace support is needed, kvm_run has been prepared */
5083		rc = 0;
5084	}
5085
5086	disable_cpu_timer_accounting(vcpu);
5087	store_regs(vcpu);
5088	kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
5089
5090	kvm_sigset_deactivate(vcpu);
5091
5092	vcpu->stat.exit_userspace++;
5093out:
5094	vcpu_put(vcpu);
5095	return rc;
5096}
5097
5098/*
5099 * store status at address
5100 * we use have two special cases:
5101 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5102 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5103 */
5104int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5105{
5106	unsigned char archmode = 1;
5107	freg_t fprs[NUM_FPRS];
5108	unsigned int px;
5109	u64 clkcomp, cputm;
5110	int rc;
5111
5112	px = kvm_s390_get_prefix(vcpu);
5113	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5114		if (write_guest_abs(vcpu, 163, &archmode, 1))
5115			return -EFAULT;
5116		gpa = 0;
5117	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5118		if (write_guest_real(vcpu, 163, &archmode, 1))
5119			return -EFAULT;
5120		gpa = px;
5121	} else
5122		gpa -= __LC_FPREGS_SAVE_AREA;
5123
5124	/* manually convert vector registers if necessary */
5125	if (cpu_has_vx()) {
5126		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5127		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5128				     fprs, 128);
5129	} else {
5130		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5131				     vcpu->run->s.regs.fprs, 128);
5132	}
5133	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5134			      vcpu->run->s.regs.gprs, 128);
5135	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5136			      &vcpu->arch.sie_block->gpsw, 16);
5137	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5138			      &px, 4);
5139	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5140			      &vcpu->run->s.regs.fpc, 4);
5141	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5142			      &vcpu->arch.sie_block->todpr, 4);
5143	cputm = kvm_s390_get_cpu_timer(vcpu);
5144	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5145			      &cputm, 8);
5146	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5147	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5148			      &clkcomp, 8);
5149	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5150			      &vcpu->run->s.regs.acrs, 64);
5151	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5152			      &vcpu->arch.sie_block->gcr, 128);
5153	return rc ? -EFAULT : 0;
5154}
5155
5156int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5157{
5158	/*
5159	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5160	 * switch in the run ioctl. Let's update our copies before we save
5161	 * it into the save area
5162	 */
5163	kvm_s390_fpu_store(vcpu->run);
5164	save_access_regs(vcpu->run->s.regs.acrs);
5165
5166	return kvm_s390_store_status_unloaded(vcpu, addr);
5167}
5168
5169static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5170{
5171	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5172	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5173}
5174
5175static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5176{
5177	unsigned long i;
5178	struct kvm_vcpu *vcpu;
5179
5180	kvm_for_each_vcpu(i, vcpu, kvm) {
5181		__disable_ibs_on_vcpu(vcpu);
5182	}
5183}
5184
5185static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5186{
5187	if (!sclp.has_ibs)
5188		return;
5189	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5190	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5191}
5192
5193int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5194{
5195	int i, online_vcpus, r = 0, started_vcpus = 0;
5196
5197	if (!is_vcpu_stopped(vcpu))
5198		return 0;
5199
5200	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5201	/* Only one cpu at a time may enter/leave the STOPPED state. */
5202	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5203	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5204
5205	/* Let's tell the UV that we want to change into the operating state */
5206	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5207		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5208		if (r) {
5209			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5210			return r;
5211		}
5212	}
5213
5214	for (i = 0; i < online_vcpus; i++) {
5215		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5216			started_vcpus++;
5217	}
5218
5219	if (started_vcpus == 0) {
5220		/* we're the only active VCPU -> speed it up */
5221		__enable_ibs_on_vcpu(vcpu);
5222	} else if (started_vcpus == 1) {
5223		/*
5224		 * As we are starting a second VCPU, we have to disable
5225		 * the IBS facility on all VCPUs to remove potentially
5226		 * outstanding ENABLE requests.
5227		 */
5228		__disable_ibs_on_all_vcpus(vcpu->kvm);
5229	}
5230
5231	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5232	/*
5233	 * The real PSW might have changed due to a RESTART interpreted by the
5234	 * ultravisor. We block all interrupts and let the next sie exit
5235	 * refresh our view.
5236	 */
5237	if (kvm_s390_pv_cpu_is_protected(vcpu))
5238		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5239	/*
5240	 * Another VCPU might have used IBS while we were offline.
5241	 * Let's play safe and flush the VCPU at startup.
5242	 */
5243	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5244	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5245	return 0;
5246}
5247
5248int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5249{
5250	int i, online_vcpus, r = 0, started_vcpus = 0;
5251	struct kvm_vcpu *started_vcpu = NULL;
5252
5253	if (is_vcpu_stopped(vcpu))
5254		return 0;
5255
5256	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5257	/* Only one cpu at a time may enter/leave the STOPPED state. */
5258	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5259	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5260
5261	/* Let's tell the UV that we want to change into the stopped state */
5262	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5263		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5264		if (r) {
5265			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5266			return r;
5267		}
5268	}
5269
5270	/*
5271	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5272	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5273	 * have been fully processed. This will ensure that the VCPU
5274	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5275	 */
5276	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5277	kvm_s390_clear_stop_irq(vcpu);
5278
5279	__disable_ibs_on_vcpu(vcpu);
5280
5281	for (i = 0; i < online_vcpus; i++) {
5282		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5283
5284		if (!is_vcpu_stopped(tmp)) {
5285			started_vcpus++;
5286			started_vcpu = tmp;
5287		}
5288	}
5289
5290	if (started_vcpus == 1) {
5291		/*
5292		 * As we only have one VCPU left, we want to enable the
5293		 * IBS facility for that VCPU to speed it up.
5294		 */
5295		__enable_ibs_on_vcpu(started_vcpu);
5296	}
5297
5298	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5299	return 0;
5300}
5301
5302static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5303				     struct kvm_enable_cap *cap)
5304{
5305	int r;
5306
5307	if (cap->flags)
5308		return -EINVAL;
5309
5310	switch (cap->cap) {
5311	case KVM_CAP_S390_CSS_SUPPORT:
5312		if (!vcpu->kvm->arch.css_support) {
5313			vcpu->kvm->arch.css_support = 1;
5314			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5315			trace_kvm_s390_enable_css(vcpu->kvm);
5316		}
5317		r = 0;
5318		break;
5319	default:
5320		r = -EINVAL;
5321		break;
5322	}
5323	return r;
5324}
5325
5326static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5327				  struct kvm_s390_mem_op *mop)
5328{
5329	void __user *uaddr = (void __user *)mop->buf;
5330	void *sida_addr;
5331	int r = 0;
5332
5333	if (mop->flags || !mop->size)
5334		return -EINVAL;
5335	if (mop->size + mop->sida_offset < mop->size)
5336		return -EINVAL;
5337	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5338		return -E2BIG;
5339	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5340		return -EINVAL;
5341
5342	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5343
5344	switch (mop->op) {
5345	case KVM_S390_MEMOP_SIDA_READ:
5346		if (copy_to_user(uaddr, sida_addr, mop->size))
5347			r = -EFAULT;
5348
5349		break;
5350	case KVM_S390_MEMOP_SIDA_WRITE:
5351		if (copy_from_user(sida_addr, uaddr, mop->size))
5352			r = -EFAULT;
5353		break;
5354	}
5355	return r;
5356}
5357
5358static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5359				 struct kvm_s390_mem_op *mop)
5360{
5361	void __user *uaddr = (void __user *)mop->buf;
5362	enum gacc_mode acc_mode;
5363	void *tmpbuf = NULL;
5364	int r;
5365
5366	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5367					KVM_S390_MEMOP_F_CHECK_ONLY |
5368					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5369	if (r)
5370		return r;
5371	if (mop->ar >= NUM_ACRS)
5372		return -EINVAL;
5373	if (kvm_s390_pv_cpu_is_protected(vcpu))
5374		return -EINVAL;
5375	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5376		tmpbuf = vmalloc(mop->size);
5377		if (!tmpbuf)
5378			return -ENOMEM;
5379	}
5380
5381	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5382	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5383		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5384				    acc_mode, mop->key);
5385		goto out_inject;
5386	}
5387	if (acc_mode == GACC_FETCH) {
5388		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5389					mop->size, mop->key);
5390		if (r)
5391			goto out_inject;
5392		if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5393			r = -EFAULT;
5394			goto out_free;
5395		}
5396	} else {
5397		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5398			r = -EFAULT;
5399			goto out_free;
5400		}
5401		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5402					 mop->size, mop->key);
5403	}
5404
5405out_inject:
5406	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5407		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5408
5409out_free:
5410	vfree(tmpbuf);
5411	return r;
5412}
5413
5414static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5415				     struct kvm_s390_mem_op *mop)
5416{
5417	int r, srcu_idx;
5418
5419	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5420
5421	switch (mop->op) {
5422	case KVM_S390_MEMOP_LOGICAL_READ:
5423	case KVM_S390_MEMOP_LOGICAL_WRITE:
5424		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5425		break;
5426	case KVM_S390_MEMOP_SIDA_READ:
5427	case KVM_S390_MEMOP_SIDA_WRITE:
5428		/* we are locked against sida going away by the vcpu->mutex */
5429		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5430		break;
5431	default:
5432		r = -EINVAL;
5433	}
5434
5435	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5436	return r;
5437}
5438
5439long kvm_arch_vcpu_async_ioctl(struct file *filp,
5440			       unsigned int ioctl, unsigned long arg)
5441{
5442	struct kvm_vcpu *vcpu = filp->private_data;
5443	void __user *argp = (void __user *)arg;
5444	int rc;
5445
5446	switch (ioctl) {
5447	case KVM_S390_IRQ: {
5448		struct kvm_s390_irq s390irq;
5449
5450		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5451			return -EFAULT;
5452		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5453		break;
5454	}
5455	case KVM_S390_INTERRUPT: {
5456		struct kvm_s390_interrupt s390int;
5457		struct kvm_s390_irq s390irq = {};
5458
5459		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5460			return -EFAULT;
5461		if (s390int_to_s390irq(&s390int, &s390irq))
5462			return -EINVAL;
5463		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5464		break;
5465	}
5466	default:
5467		rc = -ENOIOCTLCMD;
5468		break;
5469	}
5470
5471	/*
5472	 * To simplify single stepping of userspace-emulated instructions,
5473	 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5474	 * should_handle_per_ifetch()). However, if userspace emulation injects
5475	 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5476	 * after (and not before) the interrupt delivery.
5477	 */
5478	if (!rc)
5479		vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5480
5481	return rc;
5482}
5483
5484static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5485					struct kvm_pv_cmd *cmd)
5486{
5487	struct kvm_s390_pv_dmp dmp;
5488	void *data;
5489	int ret;
5490
5491	/* Dump initialization is a prerequisite */
5492	if (!vcpu->kvm->arch.pv.dumping)
5493		return -EINVAL;
5494
5495	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5496		return -EFAULT;
5497
5498	/* We only handle this subcmd right now */
5499	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5500		return -EINVAL;
5501
5502	/* CPU dump length is the same as create cpu storage donation. */
5503	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5504		return -EINVAL;
5505
5506	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5507	if (!data)
5508		return -ENOMEM;
5509
5510	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5511
5512	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5513		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5514
5515	if (ret)
5516		ret = -EINVAL;
5517
5518	/* On success copy over the dump data */
5519	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5520		ret = -EFAULT;
5521
5522	kvfree(data);
5523	return ret;
5524}
5525
5526long kvm_arch_vcpu_ioctl(struct file *filp,
5527			 unsigned int ioctl, unsigned long arg)
5528{
5529	struct kvm_vcpu *vcpu = filp->private_data;
5530	void __user *argp = (void __user *)arg;
5531	int idx;
5532	long r;
5533	u16 rc, rrc;
5534
5535	vcpu_load(vcpu);
5536
5537	switch (ioctl) {
5538	case KVM_S390_STORE_STATUS:
5539		idx = srcu_read_lock(&vcpu->kvm->srcu);
5540		r = kvm_s390_store_status_unloaded(vcpu, arg);
5541		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5542		break;
5543	case KVM_S390_SET_INITIAL_PSW: {
5544		psw_t psw;
5545
5546		r = -EFAULT;
5547		if (copy_from_user(&psw, argp, sizeof(psw)))
5548			break;
5549		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5550		break;
5551	}
5552	case KVM_S390_CLEAR_RESET:
5553		r = 0;
5554		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5555		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5556			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5557					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5558			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5559				   rc, rrc);
5560		}
5561		break;
5562	case KVM_S390_INITIAL_RESET:
5563		r = 0;
5564		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5565		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5566			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5567					  UVC_CMD_CPU_RESET_INITIAL,
5568					  &rc, &rrc);
5569			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5570				   rc, rrc);
5571		}
5572		break;
5573	case KVM_S390_NORMAL_RESET:
5574		r = 0;
5575		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5576		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5577			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5578					  UVC_CMD_CPU_RESET, &rc, &rrc);
5579			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5580				   rc, rrc);
5581		}
5582		break;
5583	case KVM_SET_ONE_REG:
5584	case KVM_GET_ONE_REG: {
5585		struct kvm_one_reg reg;
5586		r = -EINVAL;
5587		if (kvm_s390_pv_cpu_is_protected(vcpu))
5588			break;
5589		r = -EFAULT;
5590		if (copy_from_user(&reg, argp, sizeof(reg)))
5591			break;
5592		if (ioctl == KVM_SET_ONE_REG)
5593			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5594		else
5595			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5596		break;
5597	}
5598#ifdef CONFIG_KVM_S390_UCONTROL
5599	case KVM_S390_UCAS_MAP: {
5600		struct kvm_s390_ucas_mapping ucasmap;
5601
5602		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5603			r = -EFAULT;
5604			break;
5605		}
5606
5607		if (!kvm_is_ucontrol(vcpu->kvm)) {
5608			r = -EINVAL;
5609			break;
5610		}
5611
5612		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5613				     ucasmap.vcpu_addr, ucasmap.length);
5614		break;
5615	}
5616	case KVM_S390_UCAS_UNMAP: {
5617		struct kvm_s390_ucas_mapping ucasmap;
5618
5619		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5620			r = -EFAULT;
5621			break;
5622		}
5623
5624		if (!kvm_is_ucontrol(vcpu->kvm)) {
5625			r = -EINVAL;
5626			break;
5627		}
5628
5629		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5630			ucasmap.length);
5631		break;
5632	}
5633#endif
5634	case KVM_S390_VCPU_FAULT: {
5635		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5636		break;
5637	}
5638	case KVM_ENABLE_CAP:
5639	{
5640		struct kvm_enable_cap cap;
5641		r = -EFAULT;
5642		if (copy_from_user(&cap, argp, sizeof(cap)))
5643			break;
5644		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5645		break;
5646	}
5647	case KVM_S390_MEM_OP: {
5648		struct kvm_s390_mem_op mem_op;
5649
5650		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5651			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5652		else
5653			r = -EFAULT;
5654		break;
5655	}
5656	case KVM_S390_SET_IRQ_STATE: {
5657		struct kvm_s390_irq_state irq_state;
5658
5659		r = -EFAULT;
5660		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5661			break;
5662		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5663		    irq_state.len == 0 ||
5664		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5665			r = -EINVAL;
5666			break;
5667		}
5668		/* do not use irq_state.flags, it will break old QEMUs */
5669		r = kvm_s390_set_irq_state(vcpu,
5670					   (void __user *) irq_state.buf,
5671					   irq_state.len);
5672		break;
5673	}
5674	case KVM_S390_GET_IRQ_STATE: {
5675		struct kvm_s390_irq_state irq_state;
5676
5677		r = -EFAULT;
5678		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5679			break;
5680		if (irq_state.len == 0) {
5681			r = -EINVAL;
5682			break;
5683		}
5684		/* do not use irq_state.flags, it will break old QEMUs */
5685		r = kvm_s390_get_irq_state(vcpu,
5686					   (__u8 __user *)  irq_state.buf,
5687					   irq_state.len);
5688		break;
5689	}
5690	case KVM_S390_PV_CPU_COMMAND: {
5691		struct kvm_pv_cmd cmd;
5692
5693		r = -EINVAL;
5694		if (!is_prot_virt_host())
5695			break;
5696
5697		r = -EFAULT;
5698		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5699			break;
5700
5701		r = -EINVAL;
5702		if (cmd.flags)
5703			break;
5704
5705		/* We only handle this cmd right now */
5706		if (cmd.cmd != KVM_PV_DUMP)
5707			break;
5708
5709		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5710
5711		/* Always copy over UV rc / rrc data */
5712		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5713				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5714			r = -EFAULT;
5715		break;
5716	}
5717	default:
5718		r = -ENOTTY;
5719	}
5720
5721	vcpu_put(vcpu);
5722	return r;
5723}
5724
5725vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5726{
5727#ifdef CONFIG_KVM_S390_UCONTROL
5728	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5729		 && (kvm_is_ucontrol(vcpu->kvm))) {
5730		vmf->page = virt_to_page(vcpu->arch.sie_block);
5731		get_page(vmf->page);
5732		return 0;
5733	}
5734#endif
5735	return VM_FAULT_SIGBUS;
5736}
5737
5738bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5739{
5740	return true;
5741}
5742
5743/* Section: memory related */
5744int kvm_arch_prepare_memory_region(struct kvm *kvm,
5745				   const struct kvm_memory_slot *old,
5746				   struct kvm_memory_slot *new,
5747				   enum kvm_mr_change change)
5748{
5749	gpa_t size;
5750
5751	/* When we are protected, we should not change the memory slots */
5752	if (kvm_s390_pv_get_handle(kvm))
5753		return -EINVAL;
5754
5755	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5756		/*
5757		 * A few sanity checks. We can have memory slots which have to be
5758		 * located/ended at a segment boundary (1MB). The memory in userland is
5759		 * ok to be fragmented into various different vmas. It is okay to mmap()
5760		 * and munmap() stuff in this slot after doing this call at any time
5761		 */
5762
5763		if (new->userspace_addr & 0xffffful)
5764			return -EINVAL;
5765
5766		size = new->npages * PAGE_SIZE;
5767		if (size & 0xffffful)
5768			return -EINVAL;
5769
5770		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5771			return -EINVAL;
5772	}
5773
5774	if (!kvm->arch.migration_mode)
5775		return 0;
5776
5777	/*
5778	 * Turn off migration mode when:
5779	 * - userspace creates a new memslot with dirty logging off,
5780	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5781	 *   dirty logging is turned off.
5782	 * Migration mode expects dirty page logging being enabled to store
5783	 * its dirty bitmap.
5784	 */
5785	if (change != KVM_MR_DELETE &&
5786	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5787		WARN(kvm_s390_vm_stop_migration(kvm),
5788		     "Failed to stop migration mode");
5789
5790	return 0;
5791}
5792
5793void kvm_arch_commit_memory_region(struct kvm *kvm,
5794				struct kvm_memory_slot *old,
5795				const struct kvm_memory_slot *new,
5796				enum kvm_mr_change change)
5797{
5798	int rc = 0;
5799
5800	switch (change) {
5801	case KVM_MR_DELETE:
5802		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5803					old->npages * PAGE_SIZE);
5804		break;
5805	case KVM_MR_MOVE:
5806		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5807					old->npages * PAGE_SIZE);
5808		if (rc)
5809			break;
5810		fallthrough;
5811	case KVM_MR_CREATE:
5812		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5813				      new->base_gfn * PAGE_SIZE,
5814				      new->npages * PAGE_SIZE);
5815		break;
5816	case KVM_MR_FLAGS_ONLY:
5817		break;
5818	default:
5819		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5820	}
5821	if (rc)
5822		pr_warn("failed to commit memory region\n");
5823	return;
5824}
5825
5826static inline unsigned long nonhyp_mask(int i)
5827{
5828	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5829
5830	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5831}
5832
5833static int __init kvm_s390_init(void)
5834{
5835	int i, r;
5836
5837	if (!sclp.has_sief2) {
5838		pr_info("SIE is not available\n");
5839		return -ENODEV;
5840	}
5841
5842	if (nested && hpage) {
5843		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5844		return -EINVAL;
5845	}
5846
5847	for (i = 0; i < 16; i++)
5848		kvm_s390_fac_base[i] |=
5849			stfle_fac_list[i] & nonhyp_mask(i);
5850
5851	r = __kvm_s390_init();
5852	if (r)
5853		return r;
5854
5855	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5856	if (r) {
5857		__kvm_s390_exit();
5858		return r;
5859	}
5860	return 0;
5861}
5862
5863static void __exit kvm_s390_exit(void)
5864{
5865	kvm_exit();
5866
5867	__kvm_s390_exit();
5868}
5869
5870module_init(kvm_s390_init);
5871module_exit(kvm_s390_exit);
5872
5873/*
5874 * Enable autoloading of the kvm module.
5875 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5876 * since x86 takes a different approach.
5877 */
5878#include <linux/miscdevice.h>
5879MODULE_ALIAS_MISCDEV(KVM_MINOR);
5880MODULE_ALIAS("devname:kvm");
5881