11553Srgrimes/******************************************************************************/
21553Srgrimes#ifdef JEMALLOC_H_TYPES
31553Srgrimes
41553Srgrimes#endif /* JEMALLOC_H_TYPES */
51553Srgrimes/******************************************************************************/
61553Srgrimes#ifdef JEMALLOC_H_STRUCTS
71553Srgrimes
81553Srgrimes#endif /* JEMALLOC_H_STRUCTS */
91553Srgrimes/******************************************************************************/
101553Srgrimes#ifdef JEMALLOC_H_EXTERNS
111553Srgrimes
121553Srgrimes#endif /* JEMALLOC_H_EXTERNS */
131553Srgrimes/******************************************************************************/
141553Srgrimes#ifdef JEMALLOC_H_INLINES
151553Srgrimes
161553Srgrimes#ifndef JEMALLOC_ENABLE_INLINE
171553Srgrimesvoid	mb_write(void);
181553Srgrimes#endif
191553Srgrimes
201553Srgrimes#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
211553Srgrimes#ifdef __i386__
221553Srgrimes/*
231553Srgrimes * According to the Intel Architecture Software Developer's Manual, current
241553Srgrimes * processors execute instructions in order from the perspective of other
251553Srgrimes * processors in a multiprocessor system, but 1) Intel reserves the right to
261553Srgrimes * change that, and 2) the compiler's optimizer could re-order instructions if
271553Srgrimes * there weren't some form of barrier.  Therefore, even if running on an
281553Srgrimes * architecture that does not need memory barriers (everything through at least
291553Srgrimes * i686), an "optimizer barrier" is necessary.
301553Srgrimes */
311553SrgrimesJEMALLOC_INLINE void
321553Srgrimesmb_write(void)
33114601Sobrien{
341553Srgrimes
3529529Scharnier#  if 0
361553Srgrimes	/* This is a true memory barrier. */
371553Srgrimes	asm volatile ("pusha;"
381553Srgrimes	    "xor  %%eax,%%eax;"
391553Srgrimes	    "cpuid;"
401553Srgrimes	    "popa;"
411553Srgrimes	    : /* Outputs. */
42114601Sobrien	    : /* Inputs. */
4329529Scharnier	    : "memory" /* Clobbers. */
44162295Scharnier	    );
45114601Sobrien#else
46114601Sobrien	/*
471553Srgrimes	 * This is hopefully enough to keep the compiler from reordering
481553Srgrimes	 * instructions around this one.
491553Srgrimes	 */
501553Srgrimes	asm volatile ("nop;"
51162295Scharnier	    : /* Outputs. */
521553Srgrimes	    : /* Inputs. */
531553Srgrimes	    : "memory" /* Clobbers. */
541553Srgrimes	    );
55166485Smpp#endif
561553Srgrimes}
571553Srgrimes#elif (defined(__amd64__) || defined(__x86_64__))
5829529ScharnierJEMALLOC_INLINE void
5929529Scharniermb_write(void)
601553Srgrimes{
611553Srgrimes
6229529Scharnier	asm volatile ("sfence"
631553Srgrimes	    : /* Outputs. */
6429529Scharnier	    : /* Inputs. */
651553Srgrimes	    : "memory" /* Clobbers. */
6629529Scharnier	    );
671553Srgrimes}
681553Srgrimes#elif defined(__powerpc__)
691553SrgrimesJEMALLOC_INLINE void
701553Srgrimesmb_write(void)
7187596Smikeh{
7287596Smikeh
7387596Smikeh	asm volatile ("eieio"
741553Srgrimes	    : /* Outputs. */
751553Srgrimes	    : /* Inputs. */
761553Srgrimes	    : "memory" /* Clobbers. */
771553Srgrimes	    );
781553Srgrimes}
791553Srgrimes#elif defined(__sparc64__)
801553SrgrimesJEMALLOC_INLINE void
811553Srgrimesmb_write(void)
8229529Scharnier{
831553Srgrimes
841553Srgrimes	asm volatile ("membar #StoreStore"
8599800Salfred	    : /* Outputs. */
8699800Salfred	    : /* Inputs. */
8799800Salfred	    : "memory" /* Clobbers. */
8899800Salfred	    );
8999800Salfred}
9099800Salfred#elif defined(__tile__)
9199800SalfredJEMALLOC_INLINE void
9299800Salfredmb_write(void)
9399800Salfred{
9499800Salfred
9599800Salfred	__sync_synchronize();
9699800Salfred}
9799800Salfred#else
9899800Salfred/*
9929529Scharnier * This is much slower than a simple memory barrier, but the semantics of mutex
10029529Scharnier * unlock make this work.
101180187Sdes */
1021553SrgrimesJEMALLOC_INLINE void
103103071Ssobomaxmb_write(void)
104103071Ssobomax{
105103071Ssobomax	malloc_mutex_t mtx;
106103071Ssobomax
107103071Ssobomax	malloc_mutex_init(&mtx);
108103071Ssobomax	malloc_mutex_lock(&mtx);
109124830Sgrehan	malloc_mutex_unlock(&mtx);
110124830Sgrehan}
11184081Syar#endif
112126201Sceri#endif
1131553Srgrimes
1141553Srgrimes#endif /* JEMALLOC_H_INLINES */
1151553Srgrimes/******************************************************************************/
11629529Scharnier