1183906Skmacy/******************************************************************************
2255040Sgibbs * amd64/xen/xen-os.h
3183906Skmacy *
4255040Sgibbs * Random collection of macros and definition
5249588Sgabor *
6255040Sgibbs * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
7255040Sgibbs * All rights reserved.
8255040Sgibbs *
9255040Sgibbs * Permission is hereby granted, free of charge, to any person obtaining a copy
10255040Sgibbs * of this software and associated documentation files (the "Software"), to
11255040Sgibbs * deal in the Software without restriction, including without limitation the
12255040Sgibbs * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
13255040Sgibbs * sell copies of the Software, and to permit persons to whom the Software is
14255040Sgibbs * furnished to do so, subject to the following conditions:
15255040Sgibbs *
16255040Sgibbs * The above copyright notice and this permission notice shall be included in
17255040Sgibbs * all copies or substantial portions of the Software.
18255040Sgibbs *
19255040Sgibbs * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20255040Sgibbs * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21255040Sgibbs * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22255040Sgibbs * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23255040Sgibbs * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24255040Sgibbs * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25255040Sgibbs * DEALINGS IN THE SOFTWARE.
26255040Sgibbs *
27249588Sgabor * $FreeBSD$
28183906Skmacy */
29183906Skmacy
30255040Sgibbs#ifndef _MACHINE_XEN_XEN_OS_H_
31255040Sgibbs#define _MACHINE_XEN_XEN_OS_H_
32185637Sdfr
33183906Skmacy#ifdef PAE
34183906Skmacy#define CONFIG_X86_PAE
35183906Skmacy#endif
36183906Skmacy
37251767Sgibbs/* Everything below this point is not included by assembler (.S) files. */
38251767Sgibbs#ifndef __ASSEMBLY__
39251767Sgibbs
40183906Skmacy/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
41183906Skmacystatic inline void rep_nop(void)
42183906Skmacy{
43183906Skmacy    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
44183906Skmacy}
45183906Skmacy#define cpu_relax() rep_nop()
46183906Skmacy
47183906Skmacy/* This is a barrier for the compiler only, NOT the processor! */
48183906Skmacy#define barrier() __asm__ __volatile__("": : :"memory")
49183906Skmacy
50183906Skmacy#define LOCK_PREFIX ""
51183906Skmacy#define LOCK ""
52183906Skmacy#define ADDR (*(volatile long *) addr)
53183906Skmacy
54183906Skmacy/**
55183906Skmacy * test_and_clear_bit - Clear a bit and return its old value
56183906Skmacy * @nr: Bit to set
57183906Skmacy * @addr: Address to count from
58183906Skmacy *
59183906Skmacy * This operation is atomic and cannot be reordered.
60183906Skmacy * It also implies a memory barrier.
61183906Skmacy */
62183906Skmacystatic __inline int test_and_clear_bit(int nr, volatile void * addr)
63183906Skmacy{
64183906Skmacy        int oldbit;
65183906Skmacy
66183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
67183906Skmacy                "btrl %2,%1\n\tsbbl %0,%0"
68183906Skmacy                :"=r" (oldbit),"=m" (ADDR)
69183906Skmacy                :"Ir" (nr) : "memory");
70183906Skmacy        return oldbit;
71183906Skmacy}
72183906Skmacy
73183906Skmacystatic __inline int constant_test_bit(int nr, const volatile void * addr)
74183906Skmacy{
75183906Skmacy    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
76183906Skmacy}
77183906Skmacy
78183906Skmacystatic __inline int variable_test_bit(int nr, volatile void * addr)
79183906Skmacy{
80183906Skmacy    int oldbit;
81183906Skmacy
82183906Skmacy    __asm__ __volatile__(
83183906Skmacy        "btl %2,%1\n\tsbbl %0,%0"
84183906Skmacy        :"=r" (oldbit)
85183906Skmacy        :"m" (ADDR),"Ir" (nr));
86183906Skmacy    return oldbit;
87183906Skmacy}
88183906Skmacy
89183906Skmacy#define test_bit(nr,addr) \
90183906Skmacy(__builtin_constant_p(nr) ? \
91183906Skmacy constant_test_bit((nr),(addr)) : \
92183906Skmacy variable_test_bit((nr),(addr)))
93183906Skmacy
94183906Skmacy/**
95183906Skmacy * set_bit - Atomically set a bit in memory
96183906Skmacy * @nr: the bit to set
97183906Skmacy * @addr: the address to start counting from
98183906Skmacy *
99183906Skmacy * This function is atomic and may not be reordered.  See __set_bit()
100183906Skmacy * if you do not require the atomic guarantees.
101183906Skmacy * Note that @nr may be almost arbitrarily large; this function is not
102183906Skmacy * restricted to acting on a single-word quantity.
103183906Skmacy */
104183906Skmacystatic __inline__ void set_bit(int nr, volatile void * addr)
105183906Skmacy{
106183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
107183906Skmacy                "btsl %1,%0"
108183906Skmacy                :"=m" (ADDR)
109183906Skmacy                :"Ir" (nr));
110183906Skmacy}
111183906Skmacy
112183906Skmacy/**
113183906Skmacy * clear_bit - Clears a bit in memory
114183906Skmacy * @nr: Bit to clear
115183906Skmacy * @addr: Address to start counting from
116183906Skmacy *
117183906Skmacy * clear_bit() is atomic and may not be reordered.  However, it does
118183906Skmacy * not contain a memory barrier, so if it is used for locking purposes,
119183906Skmacy * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
120183906Skmacy * in order to ensure changes are visible on other processors.
121183906Skmacy */
122183906Skmacystatic __inline__ void clear_bit(int nr, volatile void * addr)
123183906Skmacy{
124183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
125183906Skmacy                "btrl %1,%0"
126183906Skmacy                :"=m" (ADDR)
127183906Skmacy                :"Ir" (nr));
128183906Skmacy}
129183906Skmacy
130183906Skmacy#endif /* !__ASSEMBLY__ */
131183906Skmacy
132255040Sgibbs#endif /* _MACHINE_XEN_XEN_OS_H_ */
133