1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1994, 1995 Waldorf GmbH 4 * Copyright (C) 1994 - 2000, 06 Ralf Baechle 5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 7 * Author: Maciej W. Rozycki <macro@mips.com> 8 */ 9#ifndef _ASM_IO_H 10#define _ASM_IO_H 11 12#include <linux/bug.h> 13#include <linux/compiler.h> 14#include <linux/types.h> 15 16#include <asm/addrspace.h> 17#include <asm/byteorder.h> 18#include <asm/cpu-features.h> 19#include <asm/global_data.h> 20#include <asm/pgtable-bits.h> 21#include <asm/processor.h> 22#include <asm/string.h> 23 24#include <ioremap.h> 25#include <mangle-port.h> 26#include <spaces.h> 27 28/* 29 * Raw operations are never swapped in software. OTOH values that raw 30 * operations are working on may or may not have been swapped by the bus 31 * hardware. An example use would be for flash memory that's used for 32 * execute in place. 33 */ 34# define __raw_ioswabb(a, x) (x) 35# define __raw_ioswabw(a, x) (x) 36# define __raw_ioswabl(a, x) (x) 37# define __raw_ioswabq(a, x) (x) 38# define ____raw_ioswabq(a, x) (x) 39 40/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ 41 42#define IO_SPACE_LIMIT 0xffff 43 44#ifdef CONFIG_DYNAMIC_IO_PORT_BASE 45 46static inline ulong mips_io_port_base(void) 47{ 48 DECLARE_GLOBAL_DATA_PTR; 49 50 return gd->arch.io_port_base; 51} 52 53static inline void set_io_port_base(unsigned long base) 54{ 55 DECLARE_GLOBAL_DATA_PTR; 56 57 gd->arch.io_port_base = base; 58 barrier(); 59} 60 61#else /* !CONFIG_DYNAMIC_IO_PORT_BASE */ 62 63static inline ulong mips_io_port_base(void) 64{ 65 return 0; 66} 67 68static inline void set_io_port_base(unsigned long base) 69{ 70 BUG_ON(base); 71} 72 73#endif /* !CONFIG_DYNAMIC_IO_PORT_BASE */ 74 75/* 76 * virt_to_phys - map virtual addresses to physical 77 * @address: address to remap 78 * 79 * The returned physical address is the physical (CPU) mapping for 80 * the memory address given. It is only valid to use this function on 81 * addresses directly mapped or allocated via kmalloc. 82 * 83 * This function does not give bus mappings for DMA transfers. In 84 * almost all conceivable cases a device driver should not be using 85 * this function 86 */ 87static inline unsigned long virt_to_phys(volatile const void *address) 88{ 89 unsigned long addr = (unsigned long)address; 90 91 /* this corresponds to kernel implementation of __pa() */ 92#ifdef CONFIG_64BIT 93 if (addr < CKSEG0) 94 return XPHYSADDR(addr); 95#endif 96 return CPHYSADDR(addr); 97} 98#define virt_to_phys virt_to_phys 99 100/* 101 * phys_to_virt - map physical address to virtual 102 * @address: address to remap 103 * 104 * The returned virtual address is a current CPU mapping for 105 * the memory address given. It is only valid to use this function on 106 * addresses that have a kernel mapping 107 * 108 * This function does not handle bus mappings for DMA transfers. In 109 * almost all conceivable cases a device driver should not be using 110 * this function 111 */ 112static inline void *phys_to_virt(unsigned long address) 113{ 114 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); 115} 116#define phys_to_virt phys_to_virt 117 118/* 119 * ISA I/O bus memory addresses are 1:1 with the physical address. 120 */ 121static inline unsigned long isa_virt_to_bus(volatile void *address) 122{ 123 return (unsigned long)address - PAGE_OFFSET; 124} 125 126static inline void *isa_bus_to_virt(unsigned long address) 127{ 128 return (void *)(address + PAGE_OFFSET); 129} 130 131#define isa_page_to_bus page_to_phys 132 133/* 134 * However PCI ones are not necessarily 1:1 and therefore these interfaces 135 * are forbidden in portable PCI drivers. 136 * 137 * Allow them for x86 for legacy drivers, though. 138 */ 139#define virt_to_bus virt_to_phys 140#define bus_to_virt phys_to_virt 141 142static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size, 143 unsigned long flags) 144{ 145 void __iomem *addr; 146 phys_addr_t phys_addr; 147 148 addr = plat_ioremap(offset, size, flags); 149 if (addr) 150 return addr; 151 152 phys_addr = fixup_bigphys_addr(offset, size); 153 return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr); 154} 155 156/* 157 * ioremap - map bus memory into CPU space 158 * @offset: bus address of the memory 159 * @size: size of the resource to map 160 * 161 * ioremap performs a platform specific sequence of operations to 162 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 163 * writew/writel functions and the other mmio helpers. The returned 164 * address is not guaranteed to be usable directly as a virtual 165 * address. 166 */ 167#define ioremap(offset, size) \ 168 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 169 170/* 171 * ioremap_nocache - map bus memory into CPU space 172 * @offset: bus address of the memory 173 * @size: size of the resource to map 174 * 175 * ioremap_nocache performs a platform specific sequence of operations to 176 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 177 * writew/writel functions and the other mmio helpers. The returned 178 * address is not guaranteed to be usable directly as a virtual 179 * address. 180 * 181 * This version of ioremap ensures that the memory is marked uncachable 182 * on the CPU as well as honouring existing caching rules from things like 183 * the PCI bus. Note that there are other caches and buffers on many 184 * busses. In particular driver authors should read up on PCI writes 185 * 186 * It's useful if some control registers are in such an area and 187 * write combining or read caching is not desirable: 188 */ 189#define ioremap_nocache(offset, size) \ 190 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 191#define ioremap_uc ioremap_nocache 192 193/* 194 * ioremap_cachable - map bus memory into CPU space 195 * @offset: bus address of the memory 196 * @size: size of the resource to map 197 * 198 * ioremap_nocache performs a platform specific sequence of operations to 199 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 200 * writew/writel functions and the other mmio helpers. The returned 201 * address is not guaranteed to be usable directly as a virtual 202 * address. 203 * 204 * This version of ioremap ensures that the memory is marked cachable by 205 * the CPU. Also enables full write-combining. Useful for some 206 * memory-like regions on I/O busses. 207 */ 208#define ioremap_cachable(offset, size) \ 209 __ioremap_mode((offset), (size), _page_cachable_default) 210 211/* 212 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 213 * requests a cachable mapping, ioremap_uncached_accelerated requests a 214 * mapping using the uncached accelerated mode which isn't supported on 215 * all processors. 216 */ 217#define ioremap_cacheable_cow(offset, size) \ 218 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 219#define ioremap_uncached_accelerated(offset, size) \ 220 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 221 222static inline void iounmap(const volatile void __iomem *addr) 223{ 224 plat_iounmap(addr); 225} 226 227#ifdef CONFIG_CPU_CAVIUM_OCTEON 228#define war_octeon_io_reorder_wmb() wmb() 229#else 230#define war_octeon_io_reorder_wmb() do { } while (0) 231#endif 232 233#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 234 \ 235static inline void pfx##write##bwlq(type val, \ 236 volatile void __iomem *mem) \ 237{ \ 238 volatile type *__mem; \ 239 type __val; \ 240 \ 241 war_octeon_io_reorder_wmb(); \ 242 \ 243 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 244 \ 245 __val = pfx##ioswab##bwlq(__mem, val); \ 246 \ 247 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 248 *__mem = __val; \ 249 else if (cpu_has_64bits) { \ 250 type __tmp; \ 251 \ 252 __asm__ __volatile__( \ 253 ".set arch=r4000" "\t\t# __writeq""\n\t" \ 254 "dsll32 %L0, %L0, 0" "\n\t" \ 255 "dsrl32 %L0, %L0, 0" "\n\t" \ 256 "dsll32 %M0, %M0, 0" "\n\t" \ 257 "or %L0, %L0, %M0" "\n\t" \ 258 "sd %L0, %2" "\n\t" \ 259 ".set mips0" "\n" \ 260 : "=r" (__tmp) \ 261 : "0" (__val), "m" (*__mem)); \ 262 } else \ 263 BUG(); \ 264} \ 265 \ 266static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 267{ \ 268 volatile type *__mem; \ 269 type __val; \ 270 \ 271 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 272 \ 273 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 274 __val = *__mem; \ 275 else if (cpu_has_64bits) { \ 276 __asm__ __volatile__( \ 277 ".set arch=r4000" "\t\t# __readq" "\n\t" \ 278 "ld %L0, %1" "\n\t" \ 279 "dsra32 %M0, %L0, 0" "\n\t" \ 280 "sll %L0, %L0, 0" "\n\t" \ 281 ".set mips0" "\n" \ 282 : "=r" (__val) \ 283 : "m" (*__mem)); \ 284 } else { \ 285 __val = 0; \ 286 BUG(); \ 287 } \ 288 \ 289 return pfx##ioswab##bwlq(__mem, __val); \ 290} 291 292#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \ 293 \ 294static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 295{ \ 296 volatile type *__addr; \ 297 type __val; \ 298 \ 299 war_octeon_io_reorder_wmb(); \ 300 \ 301 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \ 302 \ 303 __val = pfx##ioswab##bwlq(__addr, val); \ 304 \ 305 /* Really, we want this to be atomic */ \ 306 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 307 \ 308 *__addr = __val; \ 309} \ 310 \ 311static inline type pfx##in##bwlq##p(unsigned long port) \ 312{ \ 313 volatile type *__addr; \ 314 type __val; \ 315 \ 316 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \ 317 \ 318 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 319 \ 320 __val = *__addr; \ 321 \ 322 return pfx##ioswab##bwlq(__addr, __val); \ 323} 324 325#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 326 \ 327__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 328 329#define BUILDIO_MEM(bwlq, type) \ 330 \ 331__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 332__BUILD_MEMORY_PFX(, bwlq, type) \ 333__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 334 335BUILDIO_MEM(b, u8) 336BUILDIO_MEM(w, u16) 337BUILDIO_MEM(l, u32) 338BUILDIO_MEM(q, u64) 339#define __raw_readb __raw_readb 340#define __raw_readw __raw_readw 341#define __raw_readl __raw_readl 342#define __raw_readq __raw_readq 343#define __raw_writeb __raw_writeb 344#define __raw_writew __raw_writew 345#define __raw_writel __raw_writel 346#define __raw_writeq __raw_writeq 347#define readb readb 348#define readw readw 349#define readl readl 350#define readq readq 351#define writeb writeb 352#define writew writew 353#define writel writel 354#define writeq writeq 355 356#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 357 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ) \ 358 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p) 359 360#define BUILDIO_IOPORT(bwlq, type) \ 361 __BUILD_IOPORT_PFX(, bwlq, type) \ 362 __BUILD_IOPORT_PFX(__mem_, bwlq, type) 363 364BUILDIO_IOPORT(b, u8) 365BUILDIO_IOPORT(w, u16) 366BUILDIO_IOPORT(l, u32) 367#ifdef CONFIG_64BIT 368BUILDIO_IOPORT(q, u64) 369#endif 370 371#define __BUILDIO(bwlq, type) \ 372 \ 373__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 374 375__BUILDIO(q, u64) 376 377#define readb_relaxed readb 378#define readw_relaxed readw 379#define readl_relaxed readl 380#define readq_relaxed readq 381 382#define writeb_relaxed writeb 383#define writew_relaxed writew 384#define writel_relaxed writel 385#define writeq_relaxed writeq 386 387#define readb_be(addr) \ 388 __raw_readb((__force unsigned *)(addr)) 389#define readw_be(addr) \ 390 be16_to_cpu(__raw_readw((__force unsigned *)(addr))) 391#define readl_be(addr) \ 392 be32_to_cpu(__raw_readl((__force unsigned *)(addr))) 393#define readq_be(addr) \ 394 be64_to_cpu(__raw_readq((__force unsigned *)(addr))) 395 396#define writeb_be(val, addr) \ 397 __raw_writeb((val), (__force unsigned *)(addr)) 398#define writew_be(val, addr) \ 399 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) 400#define writel_be(val, addr) \ 401 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) 402#define writeq_be(val, addr) \ 403 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) 404 405/* 406 * Some code tests for these symbols 407 */ 408#define readq readq 409#define writeq writeq 410 411#define __BUILD_MEMORY_STRING(bwlq, type) \ 412 \ 413static inline void writes##bwlq(volatile void __iomem *mem, \ 414 const void *addr, unsigned int count) \ 415{ \ 416 const volatile type *__addr = addr; \ 417 \ 418 while (count--) { \ 419 __mem_write##bwlq(*__addr, mem); \ 420 __addr++; \ 421 } \ 422} \ 423 \ 424static inline void reads##bwlq(const volatile void __iomem *mem, \ 425 void *addr, \ 426 unsigned int count) \ 427{ \ 428 volatile type *__addr = addr; \ 429 \ 430 while (count--) { \ 431 *__addr = __mem_read##bwlq(mem); \ 432 __addr++; \ 433 } \ 434} 435 436#define __BUILD_IOPORT_STRING(bwlq, type) \ 437 \ 438static inline void outs##bwlq(unsigned long port, const void *addr, \ 439 unsigned int count) \ 440{ \ 441 const volatile type *__addr = addr; \ 442 \ 443 while (count--) { \ 444 __mem_out##bwlq(*__addr, port); \ 445 __addr++; \ 446 } \ 447} \ 448 \ 449static inline void ins##bwlq(unsigned long port, void *addr, \ 450 unsigned int count) \ 451{ \ 452 volatile type *__addr = addr; \ 453 \ 454 while (count--) { \ 455 *__addr = __mem_in##bwlq(port); \ 456 __addr++; \ 457 } \ 458} 459 460#define BUILDSTRING(bwlq, type) \ 461 \ 462__BUILD_MEMORY_STRING(bwlq, type) \ 463__BUILD_IOPORT_STRING(bwlq, type) 464 465BUILDSTRING(b, u8) 466BUILDSTRING(w, u16) 467BUILDSTRING(l, u32) 468#define readsb readsb 469#define readsw readsw 470#define readsl readsl 471#define writesb writesb 472#define writesw writesw 473#define writesl writesl 474#define outsb outsb 475#define outsw outsw 476#define outsl outsl 477#define insb insb 478#define insw insw 479#define insl insl 480#ifdef CONFIG_64BIT 481BUILDSTRING(q, u64) 482#define readsq readsq 483#define writesq writesq 484#define insq insq 485#define outsq outsq 486#endif 487 488 489#ifdef CONFIG_CPU_CAVIUM_OCTEON 490#define mmiowb() wmb() 491#else 492/* Depends on MIPS II instruction set */ 493#define mmiowb() asm volatile ("sync" ::: "memory") 494#endif 495 496static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 497{ 498 memset((void __force *)addr, val, count); 499} 500static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 501{ 502 memcpy(dst, (void __force *)src, count); 503} 504static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 505{ 506 memcpy((void __force *)dst, src, count); 507} 508 509/* 510 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 511 * Avoid interrupt mucking, just adjust the address for 4-byte access. 512 * Assume the addresses are 8-byte aligned. 513 */ 514#ifdef __MIPSEB__ 515#define __CSR_32_ADJUST 4 516#else 517#define __CSR_32_ADJUST 0 518#endif 519 520#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 521#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 522 523/* 524 * U-Boot specific 525 */ 526#define sync() mmiowb() 527 528#define MAP_NOCACHE 1 529 530static inline void * 531map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 532{ 533 if (flags == MAP_NOCACHE) 534 return ioremap(paddr, len); 535 536 return (void *)CKSEG0ADDR(paddr); 537} 538#define map_physmem map_physmem 539 540#define __BUILD_CLRBITS(bwlq, sfx, end, type) \ 541 \ 542static inline void clrbits_##sfx(volatile void __iomem *mem, type clr) \ 543{ \ 544 type __val = __raw_read##bwlq(mem); \ 545 __val = end##_to_cpu(__val); \ 546 __val &= ~clr; \ 547 __val = cpu_to_##end(__val); \ 548 __raw_write##bwlq(__val, mem); \ 549} 550 551#define __BUILD_SETBITS(bwlq, sfx, end, type) \ 552 \ 553static inline void setbits_##sfx(volatile void __iomem *mem, type set) \ 554{ \ 555 type __val = __raw_read##bwlq(mem); \ 556 __val = end##_to_cpu(__val); \ 557 __val |= set; \ 558 __val = cpu_to_##end(__val); \ 559 __raw_write##bwlq(__val, mem); \ 560} 561 562#define __BUILD_CLRSETBITS(bwlq, sfx, end, type) \ 563 \ 564static inline void clrsetbits_##sfx(volatile void __iomem *mem, \ 565 type clr, type set) \ 566{ \ 567 type __val = __raw_read##bwlq(mem); \ 568 __val = end##_to_cpu(__val); \ 569 __val &= ~clr; \ 570 __val |= set; \ 571 __val = cpu_to_##end(__val); \ 572 __raw_write##bwlq(__val, mem); \ 573} 574 575#define BUILD_CLRSETBITS(bwlq, sfx, end, type) \ 576 \ 577__BUILD_CLRBITS(bwlq, sfx, end, type) \ 578__BUILD_SETBITS(bwlq, sfx, end, type) \ 579__BUILD_CLRSETBITS(bwlq, sfx, end, type) 580 581#define __to_cpu(v) (v) 582#define cpu_to__(v) (v) 583 584#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v),a) 585#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) 586 587#define out_le64(a, v) out_arch(q, le64, a, v) 588#define out_le32(a, v) out_arch(l, le32, a, v) 589#define out_le16(a, v) out_arch(w, le16, a, v) 590 591#define in_le64(a) in_arch(q, le64, a) 592#define in_le32(a) in_arch(l, le32, a) 593#define in_le16(a) in_arch(w, le16, a) 594 595#define out_be64(a, v) out_arch(q, be64, a, v) 596#define out_be32(a, v) out_arch(l, be32, a, v) 597#define out_be16(a, v) out_arch(w, be16, a, v) 598 599#define in_be64(a) in_arch(q, be64, a) 600#define in_be32(a) in_arch(l, be32, a) 601#define in_be16(a) in_arch(w, be16, a) 602 603#define out_8(a, v) __raw_writeb(v, a) 604#define in_8(a) __raw_readb(a) 605 606BUILD_CLRSETBITS(b, 8, _, u8) 607BUILD_CLRSETBITS(w, le16, le16, u16) 608BUILD_CLRSETBITS(w, be16, be16, u16) 609BUILD_CLRSETBITS(w, 16, _, u16) 610BUILD_CLRSETBITS(l, le32, le32, u32) 611BUILD_CLRSETBITS(l, be32, be32, u32) 612BUILD_CLRSETBITS(l, 32, _, u32) 613BUILD_CLRSETBITS(q, le64, le64, u64) 614BUILD_CLRSETBITS(q, be64, be64, u64) 615BUILD_CLRSETBITS(q, 64, _, u64) 616 617#include <asm-generic/io.h> 618 619#endif /* _ASM_IO_H */ 620