1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Test for s390x KVM_S390_MEM_OP
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <string.h>
10#include <sys/ioctl.h>
11#include <pthread.h>
12
13#include <linux/bits.h>
14
15#include "test_util.h"
16#include "kvm_util.h"
17#include "kselftest.h"
18#include "ucall_common.h"
19
20enum mop_target {
21	LOGICAL,
22	SIDA,
23	ABSOLUTE,
24	INVALID,
25};
26
27enum mop_access_mode {
28	READ,
29	WRITE,
30	CMPXCHG,
31};
32
33struct mop_desc {
34	uintptr_t gaddr;
35	uintptr_t gaddr_v;
36	uint64_t set_flags;
37	unsigned int f_check : 1;
38	unsigned int f_inject : 1;
39	unsigned int f_key : 1;
40	unsigned int _gaddr_v : 1;
41	unsigned int _set_flags : 1;
42	unsigned int _sida_offset : 1;
43	unsigned int _ar : 1;
44	uint32_t size;
45	enum mop_target target;
46	enum mop_access_mode mode;
47	void *buf;
48	uint32_t sida_offset;
49	void *old;
50	uint8_t old_value[16];
51	bool *cmpxchg_success;
52	uint8_t ar;
53	uint8_t key;
54};
55
56const uint8_t NO_KEY = 0xff;
57
58static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
59{
60	struct kvm_s390_mem_op ksmo = {
61		.gaddr = (uintptr_t)desc->gaddr,
62		.size = desc->size,
63		.buf = ((uintptr_t)desc->buf),
64		.reserved = "ignored_ignored_ignored_ignored"
65	};
66
67	switch (desc->target) {
68	case LOGICAL:
69		if (desc->mode == READ)
70			ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
71		if (desc->mode == WRITE)
72			ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
73		break;
74	case SIDA:
75		if (desc->mode == READ)
76			ksmo.op = KVM_S390_MEMOP_SIDA_READ;
77		if (desc->mode == WRITE)
78			ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
79		break;
80	case ABSOLUTE:
81		if (desc->mode == READ)
82			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
83		if (desc->mode == WRITE)
84			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
85		if (desc->mode == CMPXCHG) {
86			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
87			ksmo.old_addr = (uint64_t)desc->old;
88			memcpy(desc->old_value, desc->old, desc->size);
89		}
90		break;
91	case INVALID:
92		ksmo.op = -1;
93	}
94	if (desc->f_check)
95		ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
96	if (desc->f_inject)
97		ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
98	if (desc->_set_flags)
99		ksmo.flags = desc->set_flags;
100	if (desc->f_key && desc->key != NO_KEY) {
101		ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
102		ksmo.key = desc->key;
103	}
104	if (desc->_ar)
105		ksmo.ar = desc->ar;
106	else
107		ksmo.ar = 0;
108	if (desc->_sida_offset)
109		ksmo.sida_offset = desc->sida_offset;
110
111	return ksmo;
112}
113
114struct test_info {
115	struct kvm_vm *vm;
116	struct kvm_vcpu *vcpu;
117};
118
119#define PRINT_MEMOP false
120static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
121{
122	if (!PRINT_MEMOP)
123		return;
124
125	if (!vcpu)
126		printf("vm memop(");
127	else
128		printf("vcpu memop(");
129	switch (ksmo->op) {
130	case KVM_S390_MEMOP_LOGICAL_READ:
131		printf("LOGICAL, READ, ");
132		break;
133	case KVM_S390_MEMOP_LOGICAL_WRITE:
134		printf("LOGICAL, WRITE, ");
135		break;
136	case KVM_S390_MEMOP_SIDA_READ:
137		printf("SIDA, READ, ");
138		break;
139	case KVM_S390_MEMOP_SIDA_WRITE:
140		printf("SIDA, WRITE, ");
141		break;
142	case KVM_S390_MEMOP_ABSOLUTE_READ:
143		printf("ABSOLUTE, READ, ");
144		break;
145	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
146		printf("ABSOLUTE, WRITE, ");
147		break;
148	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
149		printf("ABSOLUTE, CMPXCHG, ");
150		break;
151	}
152	printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",
153	       ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,
154	       ksmo->old_addr);
155	if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
156		printf(", CHECK_ONLY");
157	if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
158		printf(", INJECT_EXCEPTION");
159	if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
160		printf(", SKEY_PROTECTION");
161	puts(")");
162}
163
164static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
165			   struct mop_desc *desc)
166{
167	struct kvm_vcpu *vcpu = info.vcpu;
168
169	if (!vcpu)
170		return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
171	else
172		return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
173}
174
175static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
176			struct mop_desc *desc)
177{
178	int r;
179
180	r = err_memop_ioctl(info, ksmo, desc);
181	if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {
182		if (desc->cmpxchg_success) {
183			int diff = memcmp(desc->old_value, desc->old, desc->size);
184			*desc->cmpxchg_success = !diff;
185		}
186	}
187	TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));
188}
189
190#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...)	\
191({										\
192	struct test_info __info = (info_p);					\
193	struct mop_desc __desc = {						\
194		.target = (mop_target_p),					\
195		.mode = (access_mode_p),					\
196		.buf = (buf_p),							\
197		.size = (size_p),						\
198		__VA_ARGS__							\
199	};									\
200	struct kvm_s390_mem_op __ksmo;						\
201										\
202	if (__desc._gaddr_v) {							\
203		if (__desc.target == ABSOLUTE)					\
204			__desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v);	\
205		else								\
206			__desc.gaddr = __desc.gaddr_v;				\
207	}									\
208	__ksmo = ksmo_from_desc(&__desc);					\
209	print_memop(__info.vcpu, &__ksmo);					\
210	err##memop_ioctl(__info, &__ksmo, &__desc);				\
211})
212
213#define MOP(...) MEMOP(, __VA_ARGS__)
214#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
215
216#define GADDR(a) .gaddr = ((uintptr_t)a)
217#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
218#define CHECK_ONLY .f_check = 1
219#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
220#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
221#define AR(a) ._ar = 1, .ar = (a)
222#define KEY(a) .f_key = 1, .key = (a)
223#define INJECT .f_inject = 1
224#define CMPXCHG_OLD(o) .old = (o)
225#define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)
226
227#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
228
229#define PAGE_SHIFT 12
230#define PAGE_SIZE (1ULL << PAGE_SHIFT)
231#define PAGE_MASK (~(PAGE_SIZE - 1))
232#define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
233#define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))
234
235static uint8_t __aligned(PAGE_SIZE) mem1[65536];
236static uint8_t __aligned(PAGE_SIZE) mem2[65536];
237
238struct test_default {
239	struct kvm_vm *kvm_vm;
240	struct test_info vm;
241	struct test_info vcpu;
242	struct kvm_run *run;
243	int size;
244};
245
246static struct test_default test_default_init(void *guest_code)
247{
248	struct kvm_vcpu *vcpu;
249	struct test_default t;
250
251	t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
252	t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
253	t.vm = (struct test_info) { t.kvm_vm, NULL };
254	t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
255	t.run = vcpu->run;
256	return t;
257}
258
259enum stage {
260	/* Synced state set by host, e.g. DAT */
261	STAGE_INITED,
262	/* Guest did nothing */
263	STAGE_IDLED,
264	/* Guest set storage keys (specifics up to test case) */
265	STAGE_SKEYS_SET,
266	/* Guest copied memory (locations up to test case) */
267	STAGE_COPIED,
268	/* End of guest code reached */
269	STAGE_DONE,
270};
271
272#define HOST_SYNC(info_p, stage)					\
273({									\
274	struct test_info __info = (info_p);				\
275	struct kvm_vcpu *__vcpu = __info.vcpu;				\
276	struct ucall uc;						\
277	int __stage = (stage);						\
278									\
279	vcpu_run(__vcpu);						\
280	get_ucall(__vcpu, &uc);						\
281	if (uc.cmd == UCALL_ABORT) {					\
282		REPORT_GUEST_ASSERT(uc);				\
283	}								\
284	TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC);				\
285	TEST_ASSERT_EQ(uc.args[1], __stage);				\
286})									\
287
288static void prepare_mem12(void)
289{
290	int i;
291
292	for (i = 0; i < sizeof(mem1); i++)
293		mem1[i] = rand();
294	memset(mem2, 0xaa, sizeof(mem2));
295}
296
297#define ASSERT_MEM_EQ(p1, p2, size) \
298	TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
299
300static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
301			       enum mop_target mop_target, uint32_t size, uint8_t key)
302{
303	prepare_mem12();
304	CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
305		   GADDR_V(mem1), KEY(key));
306	HOST_SYNC(copy_cpu, STAGE_COPIED);
307	CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
308		   GADDR_V(mem2), KEY(key));
309	ASSERT_MEM_EQ(mem1, mem2, size);
310}
311
312static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
313			 enum mop_target mop_target, uint32_t size, uint8_t key)
314{
315	prepare_mem12();
316	CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
317	HOST_SYNC(copy_cpu, STAGE_COPIED);
318	CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
319		   GADDR_V(mem2), KEY(key));
320	ASSERT_MEM_EQ(mem1, mem2, size);
321}
322
323static void default_cmpxchg(struct test_default *test, uint8_t key)
324{
325	for (int size = 1; size <= 16; size *= 2) {
326		for (int offset = 0; offset < 16; offset += size) {
327			uint8_t __aligned(16) new[16] = {};
328			uint8_t __aligned(16) old[16];
329			bool succ;
330
331			prepare_mem12();
332			default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);
333
334			memcpy(&old, mem1, 16);
335			MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
336			    size, GADDR_V(mem1 + offset),
337			    CMPXCHG_OLD(old + offset),
338			    CMPXCHG_SUCCESS(&succ), KEY(key));
339			HOST_SYNC(test->vcpu, STAGE_COPIED);
340			MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
341			TEST_ASSERT(succ, "exchange of values should succeed");
342			memcpy(mem1 + offset, new + offset, size);
343			ASSERT_MEM_EQ(mem1, mem2, 16);
344
345			memcpy(&old, mem1, 16);
346			new[offset]++;
347			old[offset]++;
348			MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
349			    size, GADDR_V(mem1 + offset),
350			    CMPXCHG_OLD(old + offset),
351			    CMPXCHG_SUCCESS(&succ), KEY(key));
352			HOST_SYNC(test->vcpu, STAGE_COPIED);
353			MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
354			TEST_ASSERT(!succ, "exchange of values should not succeed");
355			ASSERT_MEM_EQ(mem1, mem2, 16);
356			ASSERT_MEM_EQ(&old, mem1, 16);
357		}
358	}
359}
360
361static void guest_copy(void)
362{
363	GUEST_SYNC(STAGE_INITED);
364	memcpy(&mem2, &mem1, sizeof(mem2));
365	GUEST_SYNC(STAGE_COPIED);
366}
367
368static void test_copy(void)
369{
370	struct test_default t = test_default_init(guest_copy);
371
372	HOST_SYNC(t.vcpu, STAGE_INITED);
373
374	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
375
376	kvm_vm_free(t.kvm_vm);
377}
378
379static void test_copy_access_register(void)
380{
381	struct test_default t = test_default_init(guest_copy);
382
383	HOST_SYNC(t.vcpu, STAGE_INITED);
384
385	prepare_mem12();
386	t.run->psw_mask &= ~(3UL << (63 - 17));
387	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
388
389	/*
390	 * Primary address space gets used if an access register
391	 * contains zero. The host makes use of AR[1] so is a good
392	 * candidate to ensure the guest AR (of zero) is used.
393	 */
394	CHECK_N_DO(MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size,
395		   GADDR_V(mem1), AR(1));
396	HOST_SYNC(t.vcpu, STAGE_COPIED);
397
398	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, t.size,
399		   GADDR_V(mem2), AR(1));
400	ASSERT_MEM_EQ(mem1, mem2, t.size);
401
402	kvm_vm_free(t.kvm_vm);
403}
404
405static void set_storage_key_range(void *addr, size_t len, uint8_t key)
406{
407	uintptr_t _addr, abs, i;
408	int not_mapped = 0;
409
410	_addr = (uintptr_t)addr;
411	for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
412		abs = i;
413		asm volatile (
414			       "lra	%[abs], 0(0,%[abs])\n"
415			"	jz	0f\n"
416			"	llill	%[not_mapped],1\n"
417			"	j	1f\n"
418			"0:	sske	%[key], %[abs]\n"
419			"1:"
420			: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
421			: [key] "r" (key)
422			: "cc"
423		);
424		GUEST_ASSERT_EQ(not_mapped, 0);
425	}
426}
427
428static void guest_copy_key(void)
429{
430	set_storage_key_range(mem1, sizeof(mem1), 0x90);
431	set_storage_key_range(mem2, sizeof(mem2), 0x90);
432	GUEST_SYNC(STAGE_SKEYS_SET);
433
434	for (;;) {
435		memcpy(&mem2, &mem1, sizeof(mem2));
436		GUEST_SYNC(STAGE_COPIED);
437	}
438}
439
440static void test_copy_key(void)
441{
442	struct test_default t = test_default_init(guest_copy_key);
443
444	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
445
446	/* vm, no key */
447	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
448
449	/* vm/vcpu, machting key or key 0 */
450	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
451	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
452	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
453	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
454	/*
455	 * There used to be different code paths for key handling depending on
456	 * if the region crossed a page boundary.
457	 * There currently are not, but the more tests the merrier.
458	 */
459	default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
460	default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
461	default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
462	default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
463
464	/* vm/vcpu, mismatching keys on read, but no fetch protection */
465	default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
466	default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
467
468	kvm_vm_free(t.kvm_vm);
469}
470
471static void test_cmpxchg_key(void)
472{
473	struct test_default t = test_default_init(guest_copy_key);
474
475	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
476
477	default_cmpxchg(&t, NO_KEY);
478	default_cmpxchg(&t, 0);
479	default_cmpxchg(&t, 9);
480
481	kvm_vm_free(t.kvm_vm);
482}
483
484static __uint128_t cut_to_size(int size, __uint128_t val)
485{
486	switch (size) {
487	case 1:
488		return (uint8_t)val;
489	case 2:
490		return (uint16_t)val;
491	case 4:
492		return (uint32_t)val;
493	case 8:
494		return (uint64_t)val;
495	case 16:
496		return val;
497	}
498	GUEST_FAIL("Invalid size = %u", size);
499	return 0;
500}
501
502static bool popcount_eq(__uint128_t a, __uint128_t b)
503{
504	unsigned int count_a, count_b;
505
506	count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
507		  __builtin_popcountl((uint64_t)a);
508	count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
509		  __builtin_popcountl((uint64_t)b);
510	return count_a == count_b;
511}
512
513static __uint128_t rotate(int size, __uint128_t val, int amount)
514{
515	unsigned int bits = size * 8;
516
517	amount = (amount + bits) % bits;
518	val = cut_to_size(size, val);
519	if (!amount)
520		return val;
521	return (val << (bits - amount)) | (val >> amount);
522}
523
524const unsigned int max_block = 16;
525
526static void choose_block(bool guest, int i, int *size, int *offset)
527{
528	unsigned int rand;
529
530	rand = i;
531	if (guest) {
532		rand = rand * 19 + 11;
533		*size = 1 << ((rand % 3) + 2);
534		rand = rand * 19 + 11;
535		*offset = (rand % max_block) & ~(*size - 1);
536	} else {
537		rand = rand * 17 + 5;
538		*size = 1 << (rand % 5);
539		rand = rand * 17 + 5;
540		*offset = (rand % max_block) & ~(*size - 1);
541	}
542}
543
544static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
545{
546	unsigned int rand;
547	int amount;
548	bool swap;
549
550	rand = i;
551	rand = rand * 3 + 1;
552	if (guest)
553		rand = rand * 3 + 1;
554	swap = rand % 2 == 0;
555	if (swap) {
556		int i, j;
557		__uint128_t new;
558		uint8_t byte0, byte1;
559
560		rand = rand * 3 + 1;
561		i = rand % size;
562		rand = rand * 3 + 1;
563		j = rand % size;
564		if (i == j)
565			return old;
566		new = rotate(16, old, i * 8);
567		byte0 = new & 0xff;
568		new &= ~0xff;
569		new = rotate(16, new, -i * 8);
570		new = rotate(16, new, j * 8);
571		byte1 = new & 0xff;
572		new = (new & ~0xff) | byte0;
573		new = rotate(16, new, -j * 8);
574		new = rotate(16, new, i * 8);
575		new = new | byte1;
576		new = rotate(16, new, -i * 8);
577		return new;
578	}
579	rand = rand * 3 + 1;
580	amount = rand % (size * 8);
581	return rotate(size, old, amount);
582}
583
584static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)
585{
586	bool ret;
587
588	switch (size) {
589	case 4: {
590			uint32_t old = *old_addr;
591
592			asm volatile ("cs %[old],%[new],%[address]"
593			    : [old] "+d" (old),
594			      [address] "+Q" (*(uint32_t *)(target))
595			    : [new] "d" ((uint32_t)new)
596			    : "cc"
597			);
598			ret = old == (uint32_t)*old_addr;
599			*old_addr = old;
600			return ret;
601		}
602	case 8: {
603			uint64_t old = *old_addr;
604
605			asm volatile ("csg %[old],%[new],%[address]"
606			    : [old] "+d" (old),
607			      [address] "+Q" (*(uint64_t *)(target))
608			    : [new] "d" ((uint64_t)new)
609			    : "cc"
610			);
611			ret = old == (uint64_t)*old_addr;
612			*old_addr = old;
613			return ret;
614		}
615	case 16: {
616			__uint128_t old = *old_addr;
617
618			asm volatile ("cdsg %[old],%[new],%[address]"
619			    : [old] "+d" (old),
620			      [address] "+Q" (*(__uint128_t *)(target))
621			    : [new] "d" (new)
622			    : "cc"
623			);
624			ret = old == *old_addr;
625			*old_addr = old;
626			return ret;
627		}
628	}
629	GUEST_FAIL("Invalid size = %u", size);
630	return 0;
631}
632
633const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;
634
635static void guest_cmpxchg_key(void)
636{
637	int size, offset;
638	__uint128_t old, new;
639
640	set_storage_key_range(mem1, max_block, 0x10);
641	set_storage_key_range(mem2, max_block, 0x10);
642	GUEST_SYNC(STAGE_SKEYS_SET);
643
644	for (int i = 0; i < cmpxchg_iter_outer; i++) {
645		do {
646			old = 1;
647		} while (!_cmpxchg(16, mem1, &old, 0));
648		for (int j = 0; j < cmpxchg_iter_inner; j++) {
649			choose_block(true, i + j, &size, &offset);
650			do {
651				new = permutate_bits(true, i + j, size, old);
652			} while (!_cmpxchg(size, mem2 + offset, &old, new));
653		}
654	}
655
656	GUEST_SYNC(STAGE_DONE);
657}
658
659static void *run_guest(void *data)
660{
661	struct test_info *info = data;
662
663	HOST_SYNC(*info, STAGE_DONE);
664	return NULL;
665}
666
667static char *quad_to_char(__uint128_t *quad, int size)
668{
669	return ((char *)quad) + (sizeof(*quad) - size);
670}
671
672static void test_cmpxchg_key_concurrent(void)
673{
674	struct test_default t = test_default_init(guest_cmpxchg_key);
675	int size, offset;
676	__uint128_t old, new;
677	bool success;
678	pthread_t thread;
679
680	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
681	prepare_mem12();
682	MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));
683	pthread_create(&thread, NULL, run_guest, &t.vcpu);
684
685	for (int i = 0; i < cmpxchg_iter_outer; i++) {
686		do {
687			old = 0;
688			new = 1;
689			MOP(t.vm, ABSOLUTE, CMPXCHG, &new,
690			    sizeof(new), GADDR_V(mem1),
691			    CMPXCHG_OLD(&old),
692			    CMPXCHG_SUCCESS(&success), KEY(1));
693		} while (!success);
694		for (int j = 0; j < cmpxchg_iter_inner; j++) {
695			choose_block(false, i + j, &size, &offset);
696			do {
697				new = permutate_bits(false, i + j, size, old);
698				MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),
699				    size, GADDR_V(mem2 + offset),
700				    CMPXCHG_OLD(quad_to_char(&old, size)),
701				    CMPXCHG_SUCCESS(&success), KEY(1));
702			} while (!success);
703		}
704	}
705
706	pthread_join(thread, NULL);
707
708	MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));
709	TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),
710		    "Must retain number of set bits");
711
712	kvm_vm_free(t.kvm_vm);
713}
714
715static void guest_copy_key_fetch_prot(void)
716{
717	/*
718	 * For some reason combining the first sync with override enablement
719	 * results in an exception when calling HOST_SYNC.
720	 */
721	GUEST_SYNC(STAGE_INITED);
722	/* Storage protection override applies to both store and fetch. */
723	set_storage_key_range(mem1, sizeof(mem1), 0x98);
724	set_storage_key_range(mem2, sizeof(mem2), 0x98);
725	GUEST_SYNC(STAGE_SKEYS_SET);
726
727	for (;;) {
728		memcpy(&mem2, &mem1, sizeof(mem2));
729		GUEST_SYNC(STAGE_COPIED);
730	}
731}
732
733static void test_copy_key_storage_prot_override(void)
734{
735	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
736
737	HOST_SYNC(t.vcpu, STAGE_INITED);
738	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
739	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
740	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
741
742	/* vcpu, mismatching keys, storage protection override in effect */
743	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
744
745	kvm_vm_free(t.kvm_vm);
746}
747
748static void test_copy_key_fetch_prot(void)
749{
750	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
751
752	HOST_SYNC(t.vcpu, STAGE_INITED);
753	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
754
755	/* vm/vcpu, matching key, fetch protection in effect */
756	default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
757	default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
758
759	kvm_vm_free(t.kvm_vm);
760}
761
762#define ERR_PROT_MOP(...)							\
763({										\
764	int rv;									\
765										\
766	rv = ERR_MOP(__VA_ARGS__);						\
767	TEST_ASSERT(rv == 4, "Should result in protection exception");		\
768})
769
770static void guest_error_key(void)
771{
772	GUEST_SYNC(STAGE_INITED);
773	set_storage_key_range(mem1, PAGE_SIZE, 0x18);
774	set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
775	GUEST_SYNC(STAGE_SKEYS_SET);
776	GUEST_SYNC(STAGE_IDLED);
777}
778
779static void test_errors_key(void)
780{
781	struct test_default t = test_default_init(guest_error_key);
782
783	HOST_SYNC(t.vcpu, STAGE_INITED);
784	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
785
786	/* vm/vcpu, mismatching keys, fetch protection in effect */
787	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
788	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
789	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
790	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
791
792	kvm_vm_free(t.kvm_vm);
793}
794
795static void test_errors_cmpxchg_key(void)
796{
797	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
798	int i;
799
800	HOST_SYNC(t.vcpu, STAGE_INITED);
801	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
802
803	for (i = 1; i <= 16; i *= 2) {
804		__uint128_t old = 0;
805
806		ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),
807			     CMPXCHG_OLD(&old), KEY(2));
808	}
809
810	kvm_vm_free(t.kvm_vm);
811}
812
813static void test_termination(void)
814{
815	struct test_default t = test_default_init(guest_error_key);
816	uint64_t prefix;
817	uint64_t teid;
818	uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
819	uint64_t psw[2];
820
821	HOST_SYNC(t.vcpu, STAGE_INITED);
822	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
823
824	/* vcpu, mismatching keys after first page */
825	ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
826	/*
827	 * The memop injected a program exception and the test needs to check the
828	 * Translation-Exception Identification (TEID). It is necessary to run
829	 * the guest in order to be able to read the TEID from guest memory.
830	 * Set the guest program new PSW, so the guest state is not clobbered.
831	 */
832	prefix = t.run->s.regs.prefix;
833	psw[0] = t.run->psw_mask;
834	psw[1] = t.run->psw_addr;
835	MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
836	HOST_SYNC(t.vcpu, STAGE_IDLED);
837	MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
838	/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
839	TEST_ASSERT_EQ(teid & teid_mask, 0);
840
841	kvm_vm_free(t.kvm_vm);
842}
843
844static void test_errors_key_storage_prot_override(void)
845{
846	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
847
848	HOST_SYNC(t.vcpu, STAGE_INITED);
849	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
850	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
851	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
852
853	/* vm, mismatching keys, storage protection override not applicable to vm */
854	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
855	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
856
857	kvm_vm_free(t.kvm_vm);
858}
859
860const uint64_t last_page_addr = -PAGE_SIZE;
861
862static void guest_copy_key_fetch_prot_override(void)
863{
864	int i;
865	char *page_0 = 0;
866
867	GUEST_SYNC(STAGE_INITED);
868	set_storage_key_range(0, PAGE_SIZE, 0x18);
869	set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
870	asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");
871	GUEST_SYNC(STAGE_SKEYS_SET);
872
873	for (;;) {
874		for (i = 0; i < PAGE_SIZE; i++)
875			page_0[i] = mem1[i];
876		GUEST_SYNC(STAGE_COPIED);
877	}
878}
879
880static void test_copy_key_fetch_prot_override(void)
881{
882	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
883	vm_vaddr_t guest_0_page, guest_last_page;
884
885	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
886	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
887	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
888		print_skip("did not allocate guest pages at required positions");
889		goto out;
890	}
891
892	HOST_SYNC(t.vcpu, STAGE_INITED);
893	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
894	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
895	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
896
897	/* vcpu, mismatching keys on fetch, fetch protection override applies */
898	prepare_mem12();
899	MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
900	HOST_SYNC(t.vcpu, STAGE_COPIED);
901	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
902	ASSERT_MEM_EQ(mem1, mem2, 2048);
903
904	/*
905	 * vcpu, mismatching keys on fetch, fetch protection override applies,
906	 * wraparound
907	 */
908	prepare_mem12();
909	MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
910	HOST_SYNC(t.vcpu, STAGE_COPIED);
911	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
912		   GADDR_V(guest_last_page), KEY(2));
913	ASSERT_MEM_EQ(mem1, mem2, 2048);
914
915out:
916	kvm_vm_free(t.kvm_vm);
917}
918
919static void test_errors_key_fetch_prot_override_not_enabled(void)
920{
921	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
922	vm_vaddr_t guest_0_page, guest_last_page;
923
924	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
925	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
926	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
927		print_skip("did not allocate guest pages at required positions");
928		goto out;
929	}
930	HOST_SYNC(t.vcpu, STAGE_INITED);
931	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
932
933	/* vcpu, mismatching keys on fetch, fetch protection override not enabled */
934	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
935
936out:
937	kvm_vm_free(t.kvm_vm);
938}
939
940static void test_errors_key_fetch_prot_override_enabled(void)
941{
942	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
943	vm_vaddr_t guest_0_page, guest_last_page;
944
945	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
946	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
947	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
948		print_skip("did not allocate guest pages at required positions");
949		goto out;
950	}
951	HOST_SYNC(t.vcpu, STAGE_INITED);
952	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
953	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
954	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
955
956	/*
957	 * vcpu, mismatching keys on fetch,
958	 * fetch protection override does not apply because memory range exceeded
959	 */
960	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
961	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
962		   GADDR_V(guest_last_page), KEY(2));
963	/* vm, fetch protected override does not apply */
964	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
965	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
966
967out:
968	kvm_vm_free(t.kvm_vm);
969}
970
971static void guest_idle(void)
972{
973	GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
974	for (;;)
975		GUEST_SYNC(STAGE_IDLED);
976}
977
978static void _test_errors_common(struct test_info info, enum mop_target target, int size)
979{
980	int rv;
981
982	/* Bad size: */
983	rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
984	TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
985
986	/* Zero size: */
987	rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
988	TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
989		    "ioctl allows 0 as size");
990
991	/* Bad flags: */
992	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
993	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
994
995	/* Bad guest address: */
996	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
997	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");
998	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));
999	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");
1000
1001	/* Bad host address: */
1002	rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
1003	TEST_ASSERT(rv == -1 && errno == EFAULT,
1004		    "ioctl does not report bad host memory address");
1005
1006	/* Bad key: */
1007	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
1008	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
1009}
1010
1011static void test_errors(void)
1012{
1013	struct test_default t = test_default_init(guest_idle);
1014	int rv;
1015
1016	HOST_SYNC(t.vcpu, STAGE_INITED);
1017
1018	_test_errors_common(t.vcpu, LOGICAL, t.size);
1019	_test_errors_common(t.vm, ABSOLUTE, t.size);
1020
1021	/* Bad operation: */
1022	rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
1023	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
1024	/* virtual addresses are not translated when passing INVALID */
1025	rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
1026	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
1027
1028	/* Bad access register: */
1029	t.run->psw_mask &= ~(3UL << (63 - 17));
1030	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
1031	HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
1032	rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
1033	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
1034	t.run->psw_mask &= ~(3UL << (63 - 17));   /* Disable AR mode */
1035	HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
1036
1037	/* Check that the SIDA calls are rejected for non-protected guests */
1038	rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1039	TEST_ASSERT(rv == -1 && errno == EINVAL,
1040		    "ioctl does not reject SIDA_READ in non-protected mode");
1041	rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1042	TEST_ASSERT(rv == -1 && errno == EINVAL,
1043		    "ioctl does not reject SIDA_WRITE in non-protected mode");
1044
1045	kvm_vm_free(t.kvm_vm);
1046}
1047
1048static void test_errors_cmpxchg(void)
1049{
1050	struct test_default t = test_default_init(guest_idle);
1051	__uint128_t old;
1052	int rv, i, power = 1;
1053
1054	HOST_SYNC(t.vcpu, STAGE_INITED);
1055
1056	for (i = 0; i < 32; i++) {
1057		if (i == power) {
1058			power *= 2;
1059			continue;
1060		}
1061		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),
1062			     CMPXCHG_OLD(&old));
1063		TEST_ASSERT(rv == -1 && errno == EINVAL,
1064			    "ioctl allows bad size for cmpxchg");
1065	}
1066	for (i = 1; i <= 16; i *= 2) {
1067		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),
1068			     CMPXCHG_OLD(&old));
1069		TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");
1070	}
1071	for (i = 2; i <= 16; i *= 2) {
1072		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),
1073			     CMPXCHG_OLD(&old));
1074		TEST_ASSERT(rv == -1 && errno == EINVAL,
1075			    "ioctl allows bad alignment for cmpxchg");
1076	}
1077
1078	kvm_vm_free(t.kvm_vm);
1079}
1080
1081int main(int argc, char *argv[])
1082{
1083	int extension_cap, idx;
1084
1085	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
1086	extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
1087
1088	struct testdef {
1089		const char *name;
1090		void (*test)(void);
1091		bool requirements_met;
1092	} testlist[] = {
1093		{
1094			.name = "simple copy",
1095			.test = test_copy,
1096			.requirements_met = true,
1097		},
1098		{
1099			.name = "generic error checks",
1100			.test = test_errors,
1101			.requirements_met = true,
1102		},
1103		{
1104			.name = "copy with storage keys",
1105			.test = test_copy_key,
1106			.requirements_met = extension_cap > 0,
1107		},
1108		{
1109			.name = "cmpxchg with storage keys",
1110			.test = test_cmpxchg_key,
1111			.requirements_met = extension_cap & 0x2,
1112		},
1113		{
1114			.name = "concurrently cmpxchg with storage keys",
1115			.test = test_cmpxchg_key_concurrent,
1116			.requirements_met = extension_cap & 0x2,
1117		},
1118		{
1119			.name = "copy with key storage protection override",
1120			.test = test_copy_key_storage_prot_override,
1121			.requirements_met = extension_cap > 0,
1122		},
1123		{
1124			.name = "copy with key fetch protection",
1125			.test = test_copy_key_fetch_prot,
1126			.requirements_met = extension_cap > 0,
1127		},
1128		{
1129			.name = "copy with key fetch protection override",
1130			.test = test_copy_key_fetch_prot_override,
1131			.requirements_met = extension_cap > 0,
1132		},
1133		{
1134			.name = "copy with access register mode",
1135			.test = test_copy_access_register,
1136			.requirements_met = true,
1137		},
1138		{
1139			.name = "error checks with key",
1140			.test = test_errors_key,
1141			.requirements_met = extension_cap > 0,
1142		},
1143		{
1144			.name = "error checks for cmpxchg with key",
1145			.test = test_errors_cmpxchg_key,
1146			.requirements_met = extension_cap & 0x2,
1147		},
1148		{
1149			.name = "error checks for cmpxchg",
1150			.test = test_errors_cmpxchg,
1151			.requirements_met = extension_cap & 0x2,
1152		},
1153		{
1154			.name = "termination",
1155			.test = test_termination,
1156			.requirements_met = extension_cap > 0,
1157		},
1158		{
1159			.name = "error checks with key storage protection override",
1160			.test = test_errors_key_storage_prot_override,
1161			.requirements_met = extension_cap > 0,
1162		},
1163		{
1164			.name = "error checks without key fetch prot override",
1165			.test = test_errors_key_fetch_prot_override_not_enabled,
1166			.requirements_met = extension_cap > 0,
1167		},
1168		{
1169			.name = "error checks with key fetch prot override",
1170			.test = test_errors_key_fetch_prot_override_enabled,
1171			.requirements_met = extension_cap > 0,
1172		},
1173	};
1174
1175	ksft_print_header();
1176	ksft_set_plan(ARRAY_SIZE(testlist));
1177
1178	for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
1179		if (testlist[idx].requirements_met) {
1180			testlist[idx].test();
1181			ksft_test_result_pass("%s\n", testlist[idx].name);
1182		} else {
1183			ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",
1184					      testlist[idx].name, extension_cap);
1185		}
1186	}
1187
1188	ksft_finished();	/* Print results and exit() accordingly */
1189}
1190