1#ifndef _ASM_IA64_DELAY_H 2#define _ASM_IA64_DELAY_H 3 4/* 5 * Delay routines using a pre-computed "cycles/usec" value. 6 * 7 * Copyright (C) 1998, 1999 Hewlett-Packard Co 8 * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> 9 * Copyright (C) 1999 VA Linux Systems 10 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 11 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 12 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 13 */ 14 15#include <linux/config.h> 16#include <linux/kernel.h> 17#include <linux/sched.h> 18 19#include <asm/processor.h> 20 21static __inline__ void 22ia64_set_itm (unsigned long val) 23{ 24 __asm__ __volatile__("mov cr.itm=%0;; srlz.d;;" :: "r"(val) : "memory"); 25} 26 27static __inline__ unsigned long 28ia64_get_itm (void) 29{ 30 unsigned long result; 31 32 __asm__ __volatile__("mov %0=cr.itm;; srlz.d;;" : "=r"(result) :: "memory"); 33 return result; 34} 35 36static __inline__ void 37ia64_set_itv (unsigned long val) 38{ 39 __asm__ __volatile__("mov cr.itv=%0;; srlz.d;;" :: "r"(val) : "memory"); 40} 41 42static __inline__ void 43ia64_set_itc (unsigned long val) 44{ 45 __asm__ __volatile__("mov ar.itc=%0;; srlz.d;;" :: "r"(val) : "memory"); 46} 47 48static __inline__ unsigned long 49ia64_get_itc (void) 50{ 51 unsigned long result; 52 53 __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); 54#ifdef CONFIG_ITANIUM 55 while (__builtin_expect ((__s32) result == -1, 0)) 56 __asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory"); 57#endif 58 return result; 59} 60 61static __inline__ void 62__delay (unsigned long loops) 63{ 64 unsigned long saved_ar_lc; 65 66 if (loops < 1) 67 return; 68 69 __asm__ __volatile__("mov %0=ar.lc;;" : "=r"(saved_ar_lc)); 70 __asm__ __volatile__("mov ar.lc=%0;;" :: "r"(loops - 1)); 71 __asm__ __volatile__("1:\tbr.cloop.sptk.few 1b;;"); 72 __asm__ __volatile__("mov ar.lc=%0" :: "r"(saved_ar_lc)); 73} 74 75static __inline__ void 76udelay (unsigned long usecs) 77{ 78 unsigned long start = ia64_get_itc(); 79 unsigned long cycles = usecs*local_cpu_data->cyc_per_usec; 80 81 while (ia64_get_itc() - start < cycles) 82 /* skip */; 83} 84 85#endif /* _ASM_IA64_DELAY_H */ 86