1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 1995 Waldorf GmbH 7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 10 * Author: Maciej W. Rozycki <macro@mips.com> 11 */ 12#ifndef _ASM_IO_H 13#define _ASM_IO_H 14 15#include <linux/compiler.h> 16#include <linux/kernel.h> 17#include <linux/types.h> 18 19#include <asm/addrspace.h> 20#include <asm/byteorder.h> 21#include <asm/cpu.h> 22#include <asm/cpu-features.h> 23#include <asm-generic/iomap.h> 24#include <asm/page.h> 25#include <asm/pgtable-bits.h> 26#include <asm/processor.h> 27#include <asm/string.h> 28 29#include <ioremap.h> 30#include <mangle-port.h> 31 32/* 33 * Slowdown I/O port space accesses for antique hardware. 34 */ 35#undef CONF_SLOWDOWN_IO 36 37/* 38 * Raw operations are never swapped in software. OTOH values that raw 39 * operations are working on may or may not have been swapped by the bus 40 * hardware. An example use would be for flash memory that's used for 41 * execute in place. 42 */ 43# define __raw_ioswabb(a,x) (x) 44# define __raw_ioswabw(a,x) (x) 45# define __raw_ioswabl(a,x) (x) 46# define __raw_ioswabq(a,x) (x) 47# define ____raw_ioswabq(a,x) (x) 48 49/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ 50 51#define IO_SPACE_LIMIT 0xffff 52 53/* 54 * On MIPS I/O ports are memory mapped, so we access them using normal 55 * load/store instructions. mips_io_port_base is the virtual address to 56 * which all ports are being mapped. For sake of efficiency some code 57 * assumes that this is an address that can be loaded with a single lui 58 * instruction, so the lower 16 bits must be zero. Should be true on 59 * on any sane architecture; generic code does not use this assumption. 60 */ 61extern const unsigned long mips_io_port_base; 62 63/* 64 * Gcc will generate code to load the value of mips_io_port_base after each 65 * function call which may be fairly wasteful in some cases. So we don't 66 * play quite by the book. We tell gcc mips_io_port_base is a long variable 67 * which solves the code generation issue. Now we need to violate the 68 * aliasing rules a little to make initialization possible and finally we 69 * will need the barrier() to fight side effects of the aliasing chat. 70 * This trickery will eventually collapse under gcc's optimizer. Oh well. 71 */ 72static inline void set_io_port_base(unsigned long base) 73{ 74 * (unsigned long *) &mips_io_port_base = base; 75 barrier(); 76} 77 78/* 79 * Thanks to James van Artsdalen for a better timing-fix than 80 * the two short jumps: using outb's to a nonexistent port seems 81 * to guarantee better timings even on fast machines. 82 * 83 * On the other hand, I'd like to be sure of a non-existent port: 84 * I feel a bit unsafe about using 0x80 (should be safe, though) 85 * 86 * Linus 87 * 88 */ 89 90#define __SLOW_DOWN_IO \ 91 __asm__ __volatile__( \ 92 "sb\t$0,0x80(%0)" \ 93 : : "r" (mips_io_port_base)); 94 95#ifdef CONF_SLOWDOWN_IO 96#ifdef REALLY_SLOW_IO 97#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 98#else 99#define SLOW_DOWN_IO __SLOW_DOWN_IO 100#endif 101#else 102#define SLOW_DOWN_IO 103#endif 104 105/* 106 * virt_to_phys - map virtual addresses to physical 107 * @address: address to remap 108 * 109 * The returned physical address is the physical (CPU) mapping for 110 * the memory address given. It is only valid to use this function on 111 * addresses directly mapped or allocated via kmalloc. 112 * 113 * This function does not give bus mappings for DMA transfers. In 114 * almost all conceivable cases a device driver should not be using 115 * this function 116 */ 117static inline unsigned long virt_to_phys(volatile const void *address) 118{ 119 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; 120} 121 122/* 123 * phys_to_virt - map physical address to virtual 124 * @address: address to remap 125 * 126 * The returned virtual address is a current CPU mapping for 127 * the memory address given. It is only valid to use this function on 128 * addresses that have a kernel mapping 129 * 130 * This function does not handle bus mappings for DMA transfers. In 131 * almost all conceivable cases a device driver should not be using 132 * this function 133 */ 134static inline void * phys_to_virt(unsigned long address) 135{ 136 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); 137} 138 139/* 140 * ISA I/O bus memory addresses are 1:1 with the physical address. 141 */ 142static inline unsigned long isa_virt_to_bus(volatile void * address) 143{ 144 return (unsigned long)address - PAGE_OFFSET; 145} 146 147static inline void * isa_bus_to_virt(unsigned long address) 148{ 149 return (void *)(address + PAGE_OFFSET); 150} 151 152#define isa_page_to_bus page_to_phys 153 154/* 155 * However PCI ones are not necessarily 1:1 and therefore these interfaces 156 * are forbidden in portable PCI drivers. 157 * 158 * Allow them for x86 for legacy drivers, though. 159 */ 160#define virt_to_bus virt_to_phys 161#define bus_to_virt phys_to_virt 162 163/* 164 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped 165 * for the processor. This implies the assumption that there is only 166 * one of these busses. 167 */ 168extern unsigned long isa_slot_offset; 169 170/* 171 * Change "struct page" to physical address. 172 */ 173#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 174 175extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); 176extern void __iounmap(const volatile void __iomem *addr); 177 178static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, 179 unsigned long flags) 180{ 181#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) 182 183 if (cpu_has_64bit_addresses) { 184 u64 base = UNCAC_BASE; 185 186 /* 187 * R10000 supports a 2 bit uncached attribute therefore 188 * UNCAC_BASE may not equal IO_BASE. 189 */ 190 if (flags == _CACHE_UNCACHED) 191 base = (u64) IO_BASE; 192 return (void __iomem *) (unsigned long) (base + offset); 193 } else if (__builtin_constant_p(offset) && 194 __builtin_constant_p(size) && __builtin_constant_p(flags)) { 195 phys_t phys_addr, last_addr; 196 197 phys_addr = fixup_bigphys_addr(offset, size); 198 199 /* Don't allow wraparound or zero size. */ 200 last_addr = phys_addr + size - 1; 201 if (!size || last_addr < phys_addr) 202 return NULL; 203 204 /* 205 * Map uncached objects in the low 512MB of address 206 * space using KSEG1. 207 */ 208 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && 209 flags == _CACHE_UNCACHED) 210 return (void __iomem *)CKSEG1ADDR(phys_addr); 211 } 212 213 return __ioremap(offset, size, flags); 214 215#undef __IS_LOW512 216} 217 218/* 219 * ioremap - map bus memory into CPU space 220 * @offset: bus address of the memory 221 * @size: size of the resource to map 222 * 223 * ioremap performs a platform specific sequence of operations to 224 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 225 * writew/writel functions and the other mmio helpers. The returned 226 * address is not guaranteed to be usable directly as a virtual 227 * address. 228 */ 229#define ioremap(offset, size) \ 230 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 231 232/* 233 * ioremap_nocache - map bus memory into CPU space 234 * @offset: bus address of the memory 235 * @size: size of the resource to map 236 * 237 * ioremap_nocache performs a platform specific sequence of operations to 238 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 239 * writew/writel functions and the other mmio helpers. The returned 240 * address is not guaranteed to be usable directly as a virtual 241 * address. 242 * 243 * This version of ioremap ensures that the memory is marked uncachable 244 * on the CPU as well as honouring existing caching rules from things like 245 * the PCI bus. Note that there are other caches and buffers on many 246 * busses. In paticular driver authors should read up on PCI writes 247 * 248 * It's useful if some control registers are in such an area and 249 * write combining or read caching is not desirable: 250 */ 251#define ioremap_nocache(offset, size) \ 252 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 253 254/* 255 * ioremap_cachable - map bus memory into CPU space 256 * @offset: bus address of the memory 257 * @size: size of the resource to map 258 * 259 * ioremap_nocache performs a platform specific sequence of operations to 260 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 261 * writew/writel functions and the other mmio helpers. The returned 262 * address is not guaranteed to be usable directly as a virtual 263 * address. 264 * 265 * This version of ioremap ensures that the memory is marked cachable by 266 * the CPU. Also enables full write-combining. Useful for some 267 * memory-like regions on I/O busses. 268 */ 269#define ioremap_cachable(offset, size) \ 270 __ioremap_mode((offset), (size), PAGE_CACHABLE_DEFAULT) 271 272/* 273 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 274 * requests a cachable mapping, ioremap_uncached_accelerated requests a 275 * mapping using the uncached accelerated mode which isn't supported on 276 * all processors. 277 */ 278#define ioremap_cacheable_cow(offset, size) \ 279 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 280#define ioremap_uncached_accelerated(offset, size) \ 281 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 282 283static inline void iounmap(const volatile void __iomem *addr) 284{ 285#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 286 287 if (cpu_has_64bit_addresses || 288 (__builtin_constant_p(addr) && __IS_KSEG1(addr))) 289 return; 290 291 __iounmap(addr); 292 293#undef __IS_KSEG1 294} 295 296#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 297 \ 298static inline void pfx##write##bwlq(type val, \ 299 volatile void __iomem *mem) \ 300{ \ 301 volatile type *__mem; \ 302 type __val; \ 303 \ 304 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 305 \ 306 __val = pfx##ioswab##bwlq(__mem, val); \ 307 \ 308 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 309 *__mem = __val; \ 310 else if (cpu_has_64bits) { \ 311 unsigned long __flags; \ 312 type __tmp; \ 313 \ 314 if (irq) \ 315 local_irq_save(__flags); \ 316 __asm__ __volatile__( \ 317 ".set mips3" "\t\t# __writeq""\n\t" \ 318 "dsll32 %L0, %L0, 0" "\n\t" \ 319 "dsrl32 %L0, %L0, 0" "\n\t" \ 320 "dsll32 %M0, %M0, 0" "\n\t" \ 321 "or %L0, %L0, %M0" "\n\t" \ 322 "sd %L0, %2" "\n\t" \ 323 ".set mips0" "\n" \ 324 : "=r" (__tmp) \ 325 : "0" (__val), "m" (*__mem)); \ 326 if (irq) \ 327 local_irq_restore(__flags); \ 328 } else \ 329 BUG(); \ 330} \ 331 \ 332static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 333{ \ 334 volatile type *__mem; \ 335 type __val; \ 336 \ 337 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 338 \ 339 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 340 __val = *__mem; \ 341 else if (cpu_has_64bits) { \ 342 unsigned long __flags; \ 343 \ 344 if (irq) \ 345 local_irq_save(__flags); \ 346 __asm__ __volatile__( \ 347 ".set mips3" "\t\t# __readq" "\n\t" \ 348 "ld %L0, %1" "\n\t" \ 349 "dsra32 %M0, %L0, 0" "\n\t" \ 350 "sll %L0, %L0, 0" "\n\t" \ 351 ".set mips0" "\n" \ 352 : "=r" (__val) \ 353 : "m" (*__mem)); \ 354 if (irq) \ 355 local_irq_restore(__flags); \ 356 } else { \ 357 __val = 0; \ 358 BUG(); \ 359 } \ 360 \ 361 return pfx##ioswab##bwlq(__mem, __val); \ 362} 363 364#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 365 \ 366static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 367{ \ 368 volatile type *__addr; \ 369 type __val; \ 370 \ 371 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 372 \ 373 __val = pfx##ioswab##bwlq(__addr, val); \ 374 \ 375 /* Really, we want this to be atomic */ \ 376 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 377 \ 378 *__addr = __val; \ 379 slow; \ 380} \ 381 \ 382static inline type pfx##in##bwlq##p(unsigned long port) \ 383{ \ 384 volatile type *__addr; \ 385 type __val; \ 386 \ 387 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 388 \ 389 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 390 \ 391 __val = *__addr; \ 392 slow; \ 393 \ 394 return pfx##ioswab##bwlq(__addr, __val); \ 395} 396 397#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 398 \ 399__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 400 401#define BUILDIO_MEM(bwlq, type) \ 402 \ 403__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 404__BUILD_MEMORY_PFX(, bwlq, type) \ 405__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 406 407BUILDIO_MEM(b, u8) 408BUILDIO_MEM(w, u16) 409BUILDIO_MEM(l, u32) 410BUILDIO_MEM(q, u64) 411 412#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 413 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 414 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 415 416#define BUILDIO_IOPORT(bwlq, type) \ 417 __BUILD_IOPORT_PFX(, bwlq, type) \ 418 __BUILD_IOPORT_PFX(__mem_, bwlq, type) 419 420BUILDIO_IOPORT(b, u8) 421BUILDIO_IOPORT(w, u16) 422BUILDIO_IOPORT(l, u32) 423#ifdef CONFIG_64BIT 424BUILDIO_IOPORT(q, u64) 425#endif 426 427#define __BUILDIO(bwlq, type) \ 428 \ 429__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 430 431__BUILDIO(q, u64) 432 433#define readb_relaxed readb 434#define readw_relaxed readw 435#define readl_relaxed readl 436#define readq_relaxed readq 437 438/* 439 * Some code tests for these symbols 440 */ 441#define readq readq 442#define writeq writeq 443 444#define __BUILD_MEMORY_STRING(bwlq, type) \ 445 \ 446static inline void writes##bwlq(volatile void __iomem *mem, \ 447 const void *addr, unsigned int count) \ 448{ \ 449 const volatile type *__addr = addr; \ 450 \ 451 while (count--) { \ 452 __mem_write##bwlq(*__addr, mem); \ 453 __addr++; \ 454 } \ 455} \ 456 \ 457static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 458 unsigned int count) \ 459{ \ 460 volatile type *__addr = addr; \ 461 \ 462 while (count--) { \ 463 *__addr = __mem_read##bwlq(mem); \ 464 __addr++; \ 465 } \ 466} 467 468#define __BUILD_IOPORT_STRING(bwlq, type) \ 469 \ 470static inline void outs##bwlq(unsigned long port, const void *addr, \ 471 unsigned int count) \ 472{ \ 473 const volatile type *__addr = addr; \ 474 \ 475 while (count--) { \ 476 __mem_out##bwlq(*__addr, port); \ 477 __addr++; \ 478 } \ 479} \ 480 \ 481static inline void ins##bwlq(unsigned long port, void *addr, \ 482 unsigned int count) \ 483{ \ 484 volatile type *__addr = addr; \ 485 \ 486 while (count--) { \ 487 *__addr = __mem_in##bwlq(port); \ 488 __addr++; \ 489 } \ 490} 491 492#define BUILDSTRING(bwlq, type) \ 493 \ 494__BUILD_MEMORY_STRING(bwlq, type) \ 495__BUILD_IOPORT_STRING(bwlq, type) 496 497BUILDSTRING(b, u8) 498BUILDSTRING(w, u16) 499BUILDSTRING(l, u32) 500#ifdef CONFIG_64BIT 501BUILDSTRING(q, u64) 502#endif 503 504 505/* Depends on MIPS II instruction set */ 506#define mmiowb() asm volatile ("sync" ::: "memory") 507 508static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 509{ 510 memset((void __force *) addr, val, count); 511} 512static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 513{ 514 memcpy(dst, (void __force *) src, count); 515} 516static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 517{ 518 memcpy((void __force *) dst, src, count); 519} 520 521/* 522 * ISA space is 'always mapped' on currently supported MIPS systems, no need 523 * to explicitly ioremap() it. The fact that the ISA IO space is mapped 524 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 525 * are physical addresses. The following constant pointer can be 526 * used as the IO-area pointer (it can be iounmapped as well, so the 527 * analogy with PCI is quite large): 528 */ 529#define __ISA_IO_base ((char *)(isa_slot_offset)) 530 531/* 532 * The caches on some architectures aren't dma-coherent and have need to 533 * handle this in software. There are three types of operations that 534 * can be applied to dma buffers. 535 * 536 * - dma_cache_wback_inv(start, size) makes caches and coherent by 537 * writing the content of the caches back to memory, if necessary. 538 * The function also invalidates the affected part of the caches as 539 * necessary before DMA transfers from outside to memory. 540 * - dma_cache_wback(start, size) makes caches and coherent by 541 * writing the content of the caches back to memory, if necessary. 542 * The function also invalidates the affected part of the caches as 543 * necessary before DMA transfers from outside to memory. 544 * - dma_cache_inv(start, size) invalidates the affected parts of the 545 * caches. Dirty lines of the caches may be written back or simply 546 * be discarded. This operation is necessary before dma operations 547 * to the memory. 548 */ 549#ifdef CONFIG_DMA_NONCOHERENT 550 551extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 552extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); 553extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); 554 555#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size) 556#define dma_cache_wback(start, size) _dma_cache_wback(start,size) 557#define dma_cache_inv(start, size) _dma_cache_inv(start,size) 558 559#else /* Sane hardware */ 560 561#define dma_cache_wback_inv(start,size) \ 562 do { (void) (start); (void) (size); } while (0) 563#define dma_cache_wback(start,size) \ 564 do { (void) (start); (void) (size); } while (0) 565#define dma_cache_inv(start,size) \ 566 do { (void) (start); (void) (size); } while (0) 567 568#endif /* CONFIG_DMA_NONCOHERENT */ 569 570/* 571 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 572 * Avoid interrupt mucking, just adjust the address for 4-byte access. 573 * Assume the addresses are 8-byte aligned. 574 */ 575#ifdef __MIPSEB__ 576#define __CSR_32_ADJUST 4 577#else 578#define __CSR_32_ADJUST 0 579#endif 580 581#define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 582#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 583 584/* 585 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 586 * access 587 */ 588#define xlate_dev_mem_ptr(p) __va(p) 589 590/* 591 * Convert a virtual cached pointer to an uncached pointer 592 */ 593#define xlate_dev_kmem_ptr(p) p 594 595#endif /* _ASM_IO_H */ 596