cvmx-spinlock.h revision 210284
1/***********************license start***************
2 *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 *  reserved.
4 *
5 *
6 *  Redistribution and use in source and binary forms, with or without
7 *  modification, are permitted provided that the following conditions are
8 *  met:
9 *
10 *      * Redistributions of source code must retain the above copyright
11 *        notice, this list of conditions and the following disclaimer.
12 *
13 *      * Redistributions in binary form must reproduce the above
14 *        copyright notice, this list of conditions and the following
15 *        disclaimer in the documentation and/or other materials provided
16 *        with the distribution.
17 *
18 *      * Neither the name of Cavium Networks nor the names of
19 *        its contributors may be used to endorse or promote products
20 *        derived from this software without specific prior written
21 *        permission.
22 *
23 *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32 *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
37 ***********************license end**************************************/
38
39
40
41
42
43
44/**
45 * @file
46 *
47 * Implementation of spinlocks.
48 *
49 * <hr>$Revision: 41586 $<hr>
50 */
51
52
53#ifndef __CVMX_SPINLOCK_H__
54#define __CVMX_SPINLOCK_H__
55
56#include "cvmx-asm.h"
57
58#ifdef	__cplusplus
59extern "C" {
60#endif
61
62/* Spinlocks for Octeon */
63
64
65// define these to enable recursive spinlock debugging
66//#define CVMX_SPINLOCK_DEBUG
67
68
69/**
70 * Spinlocks for Octeon
71 */
72typedef struct {
73    volatile uint32_t value;
74} cvmx_spinlock_t;
75
76// note - macros not expanded in inline ASM, so values hardcoded
77#define  CVMX_SPINLOCK_UNLOCKED_VAL  0
78#define  CVMX_SPINLOCK_LOCKED_VAL    1
79
80
81#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER  {CVMX_SPINLOCK_UNLOCKED_VAL}
82
83
84/**
85 * Initialize a spinlock
86 *
87 * @param lock   Lock to initialize
88 */
89static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
90{
91    lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
92}
93
94
95/**
96 * Return non-zero if the spinlock is currently locked
97 *
98 * @param lock   Lock to check
99 * @return Non-zero if locked
100 */
101static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
102{
103    return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
104}
105
106
107/**
108 * Releases lock
109 *
110 * @param lock   pointer to lock structure
111 */
112static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
113{
114    CVMX_SYNCWS;
115    lock->value = 0;
116    CVMX_SYNCWS;
117}
118
119
120/**
121 * Attempts to take the lock, but does not spin if lock is not available.
122 * May take some time to acquire the lock even if it is available
123 * due to the ll/sc not succeeding.
124 *
125 * @param lock   pointer to lock structure
126 *
127 * @return 0: lock successfully taken
128 *         1: lock not taken, held by someone else
129 * These return values match the Linux semantics.
130 */
131
132static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
133{
134    unsigned int tmp;
135
136    __asm__ __volatile__(
137    ".set noreorder         \n"
138    "1: ll   %[tmp], %[val] \n"
139    "   bnez %[tmp], 2f     \n"  // if lock held, fail immediately
140    "   li   %[tmp], 1      \n"
141    "   sc   %[tmp], %[val] \n"
142    "   beqz %[tmp], 1b     \n"
143    "   li   %[tmp], 0      \n"
144    "2:                     \n"
145    ".set reorder           \n"
146    : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
147    :
148    : "memory");
149
150    return (!!tmp);  /* normalize to 0 or 1 */
151}
152
153/**
154 * Gets lock, spins until lock is taken
155 *
156 * @param lock   pointer to lock structure
157 */
158static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
159{
160    unsigned int tmp;
161
162    __asm__ __volatile__(
163    ".set noreorder         \n"
164    "1: ll   %[tmp], %[val]  \n"
165    "   bnez %[tmp], 1b     \n"
166    "   li   %[tmp], 1      \n"
167    "   sc   %[tmp], %[val] \n"
168    "   beqz %[tmp], 1b     \n"
169    "   nop                \n"
170    ".set reorder           \n"
171    : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
172    :
173    : "memory");
174
175}
176
177
178
179/** ********************************************************************
180 * Bit spinlocks
181 * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
182 * The rest of the bits in the word are left undisturbed.  This enables more
183 * compact data structures as only 1 bit is consumed for the lock.
184 *
185 */
186
187/**
188 * Gets lock, spins until lock is taken
189 * Preserves the low 31 bits of the 32 bit
190 * word used for the lock.
191 *
192 *
193 * @param word  word to lock bit 31 of
194 */
195static inline void cvmx_spinlock_bit_lock(uint32_t *word)
196{
197    unsigned int tmp;
198    unsigned int sav;
199
200    __asm__ __volatile__(
201    ".set noreorder         \n"
202    ".set noat              \n"
203    "1: ll    %[tmp], %[val]  \n"
204    "   bbit1 %[tmp], 31, 1b    \n"
205    "   li    $at, 1      \n"
206    "   ins   %[tmp], $at, 31, 1  \n"
207    "   sc    %[tmp], %[val] \n"
208    "   beqz  %[tmp], 1b     \n"
209    "   nop                \n"
210    ".set at              \n"
211    ".set reorder           \n"
212    : [val] "+m" (*word), [tmp] "=&r" (tmp), [sav] "=&r" (sav)
213    :
214    : "memory");
215
216}
217
218/**
219 * Attempts to get lock, returns immediately with success/failure
220 * Preserves the low 31 bits of the 32 bit
221 * word used for the lock.
222 *
223 *
224 * @param word  word to lock bit 31 of
225 * @return 0: lock successfully taken
226 *         1: lock not taken, held by someone else
227 * These return values match the Linux semantics.
228 */
229static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
230{
231    unsigned int tmp;
232
233    __asm__ __volatile__(
234    ".set noreorder         \n"
235    ".set noat              \n"
236    "1: ll    %[tmp], %[val] \n"
237    "   bbit1 %[tmp], 31, 2f     \n"  // if lock held, fail immediately
238    "   li    $at, 1      \n"
239    "   ins   %[tmp], $at, 31, 1  \n"
240    "   sc    %[tmp], %[val] \n"
241    "   beqz  %[tmp], 1b     \n"
242    "   li    %[tmp], 0      \n"
243    "2:                     \n"
244    ".set at              \n"
245    ".set reorder           \n"
246    : [val] "+m" (*word), [tmp] "=&r" (tmp)
247    :
248    : "memory");
249
250    return (!!tmp);  /* normalize to 0 or 1 */
251}
252/**
253 * Releases bit lock
254 *
255 * Unconditionally clears bit 31 of the lock word.  Note that this is
256 * done non-atomically, as this implementation assumes that the rest
257 * of the bits in the word are protected by the lock.
258 *
259 * @param word  word to unlock bit 31 in
260 */
261static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
262{
263    CVMX_SYNCWS;
264    *word &= ~(1UL << 31) ;
265    CVMX_SYNCWS;
266}
267
268
269
270/** ********************************************************************
271 * Recursive spinlocks
272 */
273typedef struct {
274	volatile unsigned int value;
275	volatile unsigned int core_num;
276} cvmx_spinlock_rec_t;
277
278
279/**
280 * Initialize a recursive spinlock
281 *
282 * @param lock   Lock to initialize
283 */
284static inline void cvmx_spinlock_rec_init(cvmx_spinlock_rec_t *lock)
285{
286    lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
287}
288
289
290/**
291 * Return non-zero if the recursive spinlock is currently locked
292 *
293 * @param lock   Lock to check
294 * @return Non-zero if locked
295 */
296static inline int cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t *lock)
297{
298    return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
299}
300
301
302/**
303* Unlocks one level of recursive spinlock.  Lock is not unlocked
304* unless this is the final unlock call for that spinlock
305*
306* @param lock   ptr to recursive spinlock structure
307*/
308static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock);
309
310#ifdef CVMX_SPINLOCK_DEBUG
311#define cvmx_spinlock_rec_unlock(x)  _int_cvmx_spinlock_rec_unlock((x), __FILE__, __LINE__)
312static inline void _int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
313#else
314static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock)
315#endif
316{
317
318	unsigned int temp, result;
319    int core_num;
320    core_num = cvmx_get_core_num();
321
322#ifdef CVMX_SPINLOCK_DEBUG
323    {
324        if (lock->core_num != core_num)
325        {
326            cvmx_dprintf("ERROR: Recursive spinlock release attemped by non-owner! file: %s, line: %d\n", filename, linenum);
327            return;
328        }
329    }
330#endif
331
332	__asm__ __volatile__(
333		".set  noreorder                 \n"
334		"     addi  %[tmp], %[pid], 0x80 \n"
335		"     sw    %[tmp], %[lid]       # set lid to invalid value\n"
336                CVMX_SYNCWS_STR
337		"1:   ll    %[tmp], %[val]       \n"
338		"     addu  %[res], %[tmp], -1   # decrement lock count\n"
339		"     sc    %[res], %[val]       \n"
340		"     beqz  %[res], 1b           \n"
341		"     nop                        \n"
342		"     beq   %[tmp], %[res], 2f   # res is 1 on successful sc       \n"
343		"     nop                        \n"
344		"     sw   %[pid], %[lid]        # set lid to pid, only if lock still held\n"
345		"2:                         \n"
346                CVMX_SYNCWS_STR
347		".set  reorder                   \n"
348		: [res] "=&r" (result), [tmp] "=&r" (temp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
349		: [pid] "r" (core_num)
350		: "memory");
351
352
353#ifdef CVMX_SPINLOCK_DEBUG
354    {
355        if (lock->value == ~0UL)
356        {
357            cvmx_dprintf("ERROR: Recursive spinlock released too many times! file: %s, line: %d\n", filename, linenum);
358        }
359    }
360#endif
361
362
363}
364
365/**
366 * Takes recursive spinlock for a given core.  A core can take the lock multiple
367 * times, and the lock is released only when the corresponding number of
368 * unlocks have taken place.
369 *
370 * NOTE: This assumes only one thread per core, and that the core ID is used as
371 * the lock 'key'.  (This implementation cannot be generalized to allow
372 * multiple threads to use the same key (core id) .)
373 *
374 * @param lock   address of recursive spinlock structure.  Note that this is
375 *               distinct from the standard spinlock
376 */
377static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock);
378
379#ifdef CVMX_SPINLOCK_DEBUG
380#define cvmx_spinlock_rec_lock(x)  _int_cvmx_spinlock_rec_lock((x), __FILE__, __LINE__)
381static inline void _int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
382#else
383static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock)
384#endif
385{
386
387
388	volatile unsigned int tmp;
389	volatile int core_num;
390
391	core_num = cvmx_get_core_num();
392
393
394	__asm__ __volatile__(
395		".set  noreorder              \n"
396		"1: ll   %[tmp], %[val]       # load the count\n"
397		"   bnez %[tmp], 2f           # if count!=zero branch to 2\n"
398		"   addu %[tmp], %[tmp], 1    \n"
399		"   sc   %[tmp], %[val]       \n"
400		"   beqz %[tmp], 1b           # go back if not success\n"
401		"   nop                       \n"
402		"   j    3f                   # go to write core_num \n"
403		"2: lw   %[tmp], %[lid]       # load the core_num \n"
404		"   bne  %[tmp], %[pid], 1b   # core_num no match, restart\n"
405		"   nop                       \n"
406		"   lw   %[tmp], %[val]       \n"
407		"   addu %[tmp], %[tmp], 1    \n"
408		"   sw   %[tmp], %[val]       # update the count\n"
409		"3: sw   %[pid], %[lid]       # store the core_num\n"
410                CVMX_SYNCWS_STR
411		".set  reorder                \n"
412		: [tmp] "=&r" (tmp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
413		: [pid] "r" (core_num)
414		: "memory");
415
416#ifdef CVMX_SPINLOCK_DEBUG
417    if (lock->core_num != core_num)
418    {
419        cvmx_dprintf("cvmx_spinlock_rec_lock: lock taken, but core_num is incorrect. file: %s, line: %d\n", filename, linenum);
420    }
421#endif
422
423
424}
425
426#ifdef	__cplusplus
427}
428#endif
429
430#endif /* __CVMX_SPINLOCK_H__ */
431