1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_STRING_64_H 3#define _ASM_X86_STRING_64_H 4 5#ifdef __KERNEL__ 6#include <linux/jump_label.h> 7 8/* Written 2002 by Andi Kleen */ 9 10/* Even with __builtin_ the compiler may decide to use the out of line 11 function. */ 12 13#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY) 14#include <linux/kmsan_string.h> 15#endif 16 17#define __HAVE_ARCH_MEMCPY 1 18extern void *memcpy(void *to, const void *from, size_t len); 19extern void *__memcpy(void *to, const void *from, size_t len); 20 21#define __HAVE_ARCH_MEMSET 22void *memset(void *s, int c, size_t n); 23void *__memset(void *s, int c, size_t n); 24 25/* 26 * KMSAN needs to instrument as much code as possible. Use C versions of 27 * memsetXX() from lib/string.c under KMSAN. 28 */ 29#if !defined(CONFIG_KMSAN) 30#define __HAVE_ARCH_MEMSET16 31static inline void *memset16(uint16_t *s, uint16_t v, size_t n) 32{ 33 const __auto_type s0 = s; 34 asm volatile ( 35 "rep stosw" 36 : "+D" (s), "+c" (n) 37 : "a" (v) 38 : "memory" 39 ); 40 return s0; 41} 42 43#define __HAVE_ARCH_MEMSET32 44static inline void *memset32(uint32_t *s, uint32_t v, size_t n) 45{ 46 const __auto_type s0 = s; 47 asm volatile ( 48 "rep stosl" 49 : "+D" (s), "+c" (n) 50 : "a" (v) 51 : "memory" 52 ); 53 return s0; 54} 55 56#define __HAVE_ARCH_MEMSET64 57static inline void *memset64(uint64_t *s, uint64_t v, size_t n) 58{ 59 const __auto_type s0 = s; 60 asm volatile ( 61 "rep stosq" 62 : "+D" (s), "+c" (n) 63 : "a" (v) 64 : "memory" 65 ); 66 return s0; 67} 68#endif 69 70#define __HAVE_ARCH_MEMMOVE 71void *memmove(void *dest, const void *src, size_t count); 72void *__memmove(void *dest, const void *src, size_t count); 73 74int memcmp(const void *cs, const void *ct, size_t count); 75size_t strlen(const char *s); 76char *strcpy(char *dest, const char *src); 77char *strcat(char *dest, const char *src); 78int strcmp(const char *cs, const char *ct); 79 80#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 81#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 82void __memcpy_flushcache(void *dst, const void *src, size_t cnt); 83static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt) 84{ 85 if (__builtin_constant_p(cnt)) { 86 switch (cnt) { 87 case 4: 88 asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src)); 89 return; 90 case 8: 91 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src)); 92 return; 93 case 16: 94 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src)); 95 asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8))); 96 return; 97 } 98 } 99 __memcpy_flushcache(dst, src, cnt); 100} 101#endif 102 103#endif /* __KERNEL__ */ 104 105#endif /* _ASM_X86_STRING_64_H */ 106