cvmx-cmd-queue.h revision 210286
1/***********************license start***************
2 *  Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
3 *  reserved.
4 *
5 *
6 *  Redistribution and use in source and binary forms, with or without
7 *  modification, are permitted provided that the following conditions are
8 *  met:
9 *
10 *      * Redistributions of source code must retain the above copyright
11 *        notice, this list of conditions and the following disclaimer.
12 *
13 *      * Redistributions in binary form must reproduce the above
14 *        copyright notice, this list of conditions and the following
15 *        disclaimer in the documentation and/or other materials provided
16 *        with the distribution.
17 *
18 *      * Neither the name of Cavium Networks nor the names of
19 *        its contributors may be used to endorse or promote products
20 *        derived from this software without specific prior written
21 *        permission.
22 *
23 *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32 *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33 *
34 *
35 *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36 *
37 ***********************license end**************************************/
38
39
40
41
42
43
44/**
45 * @file
46 *
47 * Support functions for managing command queues used for
48 * various hardware blocks.
49 *
50 * The common command queue infrastructure abstracts out the
51 * software necessary for adding to Octeon's chained queue
52 * structures. These structures are used for commands to the
53 * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
54 * hardware unit takes commands and CSRs of different types,
55 * they all use basic linked command buffers to store the
56 * pending request. In general, users of the CVMX API don't
57 * call cvmx-cmd-queue functions directly. Instead the hardware
58 * unit specific wrapper should be used. The wrappers perform
59 * unit specific validation and CSR writes to submit the
60 * commands.
61 *
62 * Even though most software will never directly interact with
63 * cvmx-cmd-queue, knowledge of its internal working can help
64 * in diagnosing performance problems and help with debugging.
65 *
66 * Command queue pointers are stored in a global named block
67 * called "cvmx_cmd_queues". Except for the PKO queues, each
68 * hardware queue is stored in its own cache line to reduce SMP
69 * contention on spin locks. The PKO queues are stored such that
70 * every 16th queue is next to each other in memory. This scheme
71 * allows for queues being in separate cache lines when there
72 * are low number of queues per port. With 16 queues per port,
73 * the first queue for each port is in the same cache area. The
74 * second queues for each port are in another area, etc. This
75 * allows software to implement very efficient lockless PKO with
76 * 16 queues per port using a minimum of cache lines per core.
77 * All queues for a given core will be isolated in the same
78 * cache area.
79 *
80 * In addition to the memory pointer layout, cvmx-cmd-queue
81 * provides an optimized fair ll/sc locking mechanism for the
82 * queues. The lock uses a "ticket / now serving" model to
83 * maintain fair order on contended locks. In addition, it uses
84 * predicted locking time to limit cache contention. When a core
85 * know it must wait in line for a lock, it spins on the
86 * internal cycle counter to completely eliminate any causes of
87 * bus traffic.
88 *
89 * <hr> $Revision: 42150 $ <hr>
90 */
91
92#ifndef __CVMX_CMD_QUEUE_H__
93#define __CVMX_CMD_QUEUE_H__
94
95#include "executive-config.h"
96#include "cvmx-config.h"
97#include "cvmx-fpa.h"
98
99#ifdef	__cplusplus
100extern "C" {
101#endif
102
103/**
104 * By default we disable the max depth support. Most programs
105 * don't use it and it slows down the command queue processing
106 * significantly.
107 */
108#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
109#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
110#endif
111
112/**
113 * Enumeration representing all hardware blocks that use command
114 * queues. Each hardware block has up to 65536 sub identifiers for
115 * multiple command queues. Not all chips support all hardware
116 * units.
117 */
118typedef enum
119{
120    CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
121#define CVMX_CMD_QUEUE_PKO(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
122    CVMX_CMD_QUEUE_ZIP      = 0x10000,
123    CVMX_CMD_QUEUE_DFA      = 0x20000,
124    CVMX_CMD_QUEUE_RAID     = 0x30000,
125    CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
126#define CVMX_CMD_QUEUE_DMA(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
127    CVMX_CMD_QUEUE_END      = 0x50000,
128} cvmx_cmd_queue_id_t;
129
130/**
131 * Command write operations can fail if the comamnd queue needs
132 * a new buffer and the associated FPA pool is empty. It can also
133 * fail if the number of queued command words reaches the maximum
134 * set at initialization.
135 */
136typedef enum
137{
138    CVMX_CMD_QUEUE_SUCCESS = 0,
139    CVMX_CMD_QUEUE_NO_MEMORY = -1,
140    CVMX_CMD_QUEUE_FULL = -2,
141    CVMX_CMD_QUEUE_INVALID_PARAM = -3,
142    CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
143} cvmx_cmd_queue_result_t;
144
145typedef struct
146{
147    uint8_t  now_serving;           /**< You have lock when this is your ticket */
148    uint64_t unused1        : 24;
149    uint32_t max_depth;             /**< Maximum outstanding command words */
150    uint64_t fpa_pool       : 3;    /**< FPA pool buffers come from */
151    uint64_t base_ptr_div128: 29;   /**< Top of command buffer pointer shifted 7 */
152    uint64_t unused2        : 6;
153    uint64_t pool_size_m1   : 13;   /**< FPA buffer size in 64bit words minus 1 */
154    uint64_t index          : 13;   /**< Number of comamnds already used in buffer */
155} __cvmx_cmd_queue_state_t;
156
157/**
158 * This structure contains the global state of all comamnd queues.
159 * It is stored in a bootmem named block and shared by all
160 * applications running on Octeon. Tickets are stored in a differnet
161 * cahce line that queue information to reduce the contention on the
162 * ll/sc used to get a ticket. If this is not the case, the update
163 * of queue state causes the ll/sc to fail quite often.
164 */
165typedef struct
166{
167    uint64_t                 ticket[(CVMX_CMD_QUEUE_END>>16) * 256];
168    __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END>>16) * 256];
169} __cvmx_cmd_queue_all_state_t;
170
171/**
172 * Initialize a command queue for use. The initial FPA buffer is
173 * allocated and the hardware unit is configured to point to the
174 * new command queue.
175 *
176 * @param queue_id  Hardware command queue to initialize.
177 * @param max_depth Maximum outstanding commands that can be queued.
178 * @param fpa_pool  FPA pool the command queues should come from.
179 * @param pool_size Size of each buffer in the FPA pool (bytes)
180 *
181 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
182 */
183cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size);
184
185/**
186 * Shutdown a queue a free it's command buffers to the FPA. The
187 * hardware connected to the queue must be stopped before this
188 * function is called.
189 *
190 * @param queue_id Queue to shutdown
191 *
192 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
193 */
194cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
195
196/**
197 * Return the number of command words pending in the queue. This
198 * function may be relatively slow for some hardware units.
199 *
200 * @param queue_id Hardware command queue to query
201 *
202 * @return Number of outstanding commands
203 */
204int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
205
206/**
207 * Return the command buffer to be written to. The purpose of this
208 * function is to allow CVMX routine access t othe low level buffer
209 * for initial hardware setup. User applications should not call this
210 * function directly.
211 *
212 * @param queue_id Command queue to query
213 *
214 * @return Command buffer or NULL on failure
215 */
216void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
217
218/**
219 * @INTERNAL
220 * Get the index into the state arrays for the supplied queue id.
221 *
222 * @param queue_id Queue ID to get an index for
223 *
224 * @return Index into the state arrays
225 */
226static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
227{
228    /* Warning: This code currently only works with devices that have 256 queues
229        or less. Devices with more than 16 queues are layed out in memory to allow
230        cores quick access to every 16th queue. This reduces cache thrashing
231        when you are running 16 queues per port to support lockless operation */
232    int unit = queue_id>>16;
233    int q = (queue_id >> 4) & 0xf;
234    int core = queue_id & 0xf;
235    return unit*256 + core*16 + q;
236}
237
238
239/**
240 * @INTERNAL
241 * Lock the supplied queue so nobody else is updating it at the same
242 * time as us.
243 *
244 * @param queue_id Queue ID to lock
245 * @param qptr     Pointer to the queue's global state
246 */
247static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, __cvmx_cmd_queue_state_t *qptr)
248{
249    extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
250    int tmp;
251    int my_ticket;
252    CVMX_PREFETCH(qptr, 0);
253    asm volatile (
254        ".set push\n"
255        ".set noreorder\n"
256        "1:\n"
257        "ll     %[my_ticket], %[ticket_ptr]\n"          /* Atomic add one to ticket_ptr */
258        "li     %[ticket], 1\n"                         /*    and store the original value */
259        "baddu  %[ticket], %[my_ticket]\n"              /*    in my_ticket */
260        "sc     %[ticket], %[ticket_ptr]\n"
261        "beqz   %[ticket], 1b\n"
262        " nop\n"
263        "lbu    %[ticket], %[now_serving]\n"            /* Load the current now_serving ticket */
264        "2:\n"
265        "beq    %[ticket], %[my_ticket], 4f\n"          /* Jump out if now_serving == my_ticket */
266        " subu   %[ticket], %[my_ticket], %[ticket]\n"  /* Find out how many tickets are in front of me */
267        "subu  %[ticket], 1\n"                          /* Use tickets in front of me minus one to delay */
268        "cins   %[ticket], %[ticket], 5, 7\n"           /* Delay will be ((tickets in front)-1)*32 loops */
269        "3:\n"
270        "bnez   %[ticket], 3b\n"                        /* Loop here until our ticket might be up */
271        " subu  %[ticket], 1\n"
272        "b      2b\n"                                   /* Jump back up to check out ticket again */
273        " lbu   %[ticket], %[now_serving]\n"            /* Load the current now_serving ticket */
274        "4:\n"
275        ".set pop\n"
276        : [ticket_ptr] "=m" (__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
277          [now_serving] "=m" (qptr->now_serving),
278          [ticket] "=r" (tmp),
279          [my_ticket] "=r" (my_ticket)
280    );
281}
282
283
284/**
285 * @INTERNAL
286 * Unlock the queue, flushing all writes.
287 *
288 * @param qptr   Queue to unlock
289 */
290static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
291{
292    qptr->now_serving++;
293    CVMX_SYNCWS;
294}
295
296
297/**
298 * @INTERNAL
299 * Get the queue state structure for the given queue id
300 *
301 * @param queue_id Queue id to get
302 *
303 * @return Queue structure or NULL on failure
304 */
305static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
306{
307    extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
308    if (CVMX_ENABLE_PARAMETER_CHECKING)
309    {
310        if (cvmx_unlikely(queue_id >= CVMX_CMD_QUEUE_END))
311            return NULL;
312        if (cvmx_unlikely((queue_id & 0xffff) >= 256))
313            return NULL;
314    }
315    return &__cvmx_cmd_queue_state_ptr->state[__cvmx_cmd_queue_get_index(queue_id)];
316}
317
318
319/**
320 * Write an arbitrary number of command words to a command queue.
321 * This is a generic function; the fixed number of comamnd word
322 * functions yield higher performance.
323 *
324 * @param queue_id  Hardware command queue to write to
325 * @param use_locking
326 *                  Use internal locking to ensure exclusive access for queue
327 *                  updates. If you don't use this locking you must ensure
328 *                  exclusivity some other way. Locking is strongly recommended.
329 * @param cmd_count Number of command words to write
330 * @param cmds      Array of comamnds to write
331 *
332 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
333 */
334static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, int use_locking, int cmd_count, uint64_t *cmds)
335{
336    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
337
338    if (CVMX_ENABLE_PARAMETER_CHECKING)
339    {
340        if (cvmx_unlikely(qptr == NULL))
341            return CVMX_CMD_QUEUE_INVALID_PARAM;
342        if (cvmx_unlikely((cmd_count < 1) || (cmd_count > 32)))
343            return CVMX_CMD_QUEUE_INVALID_PARAM;
344        if (cvmx_unlikely(cmds == NULL))
345            return CVMX_CMD_QUEUE_INVALID_PARAM;
346    }
347
348    /* Make sure nobody else is updating the same queue */
349    if (cvmx_likely(use_locking))
350        __cvmx_cmd_queue_lock(queue_id, qptr);
351
352    /* If a max queue length was specified then make sure we don't
353        exceed it. If any part of the command would be below the limit
354        we allow it */
355    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
356    {
357        if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
358        {
359            if (cvmx_likely(use_locking))
360                __cvmx_cmd_queue_unlock(qptr);
361            return CVMX_CMD_QUEUE_FULL;
362        }
363    }
364
365    /* Normally there is plenty of room in the current buffer for the command */
366    if (cvmx_likely(qptr->index + cmd_count < qptr->pool_size_m1))
367    {
368        uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
369        ptr += qptr->index;
370        qptr->index += cmd_count;
371        while (cmd_count--)
372            *ptr++ = *cmds++;
373    }
374    else
375    {
376        uint64_t *ptr;
377        int count;
378        /* We need a new comamnd buffer. Fail if there isn't one available */
379        uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
380        if (cvmx_unlikely(new_buffer == NULL))
381        {
382            if (cvmx_likely(use_locking))
383                __cvmx_cmd_queue_unlock(qptr);
384            return CVMX_CMD_QUEUE_NO_MEMORY;
385        }
386        ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
387        /* Figure out how many command words will fit in this buffer. One
388            location will be needed for the next buffer pointer */
389        count = qptr->pool_size_m1 - qptr->index;
390        ptr += qptr->index;
391        cmd_count-=count;
392        while (count--)
393            *ptr++ = *cmds++;
394        *ptr = cvmx_ptr_to_phys(new_buffer);
395        /* The current buffer is full and has a link to the next buffer. Time
396            to write the rest of the commands into the new buffer */
397        qptr->base_ptr_div128 = *ptr >> 7;
398        qptr->index = cmd_count;
399        ptr = new_buffer;
400        while (cmd_count--)
401            *ptr++ = *cmds++;
402    }
403
404    /* All updates are complete. Release the lock and return */
405    if (cvmx_likely(use_locking))
406        __cvmx_cmd_queue_unlock(qptr);
407    return CVMX_CMD_QUEUE_SUCCESS;
408}
409
410
411/**
412 * Simple function to write two command words to a command
413 * queue.
414 *
415 * @param queue_id Hardware command queue to write to
416 * @param use_locking
417 *                 Use internal locking to ensure exclusive access for queue
418 *                 updates. If you don't use this locking you must ensure
419 *                 exclusivity some other way. Locking is strongly recommended.
420 * @param cmd1     Command
421 * @param cmd2     Command
422 *
423 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
424 */
425static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2)
426{
427    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
428
429    if (CVMX_ENABLE_PARAMETER_CHECKING)
430    {
431        if (cvmx_unlikely(qptr == NULL))
432            return CVMX_CMD_QUEUE_INVALID_PARAM;
433    }
434
435    /* Make sure nobody else is updating the same queue */
436    if (cvmx_likely(use_locking))
437        __cvmx_cmd_queue_lock(queue_id, qptr);
438
439    /* If a max queue length was specified then make sure we don't
440        exceed it. If any part of the command would be below the limit
441        we allow it */
442    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
443    {
444        if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
445        {
446            if (cvmx_likely(use_locking))
447                __cvmx_cmd_queue_unlock(qptr);
448            return CVMX_CMD_QUEUE_FULL;
449        }
450    }
451
452    /* Normally there is plenty of room in the current buffer for the command */
453    if (cvmx_likely(qptr->index + 2 < qptr->pool_size_m1))
454    {
455        uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
456        ptr += qptr->index;
457        qptr->index += 2;
458        ptr[0] = cmd1;
459        ptr[1] = cmd2;
460    }
461    else
462    {
463        uint64_t *ptr;
464        /* Figure out how many command words will fit in this buffer. One
465            location will be needed for the next buffer pointer */
466        int count = qptr->pool_size_m1 - qptr->index;
467        /* We need a new comamnd buffer. Fail if there isn't one available */
468        uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
469        if (cvmx_unlikely(new_buffer == NULL))
470        {
471            if (cvmx_likely(use_locking))
472                __cvmx_cmd_queue_unlock(qptr);
473            return CVMX_CMD_QUEUE_NO_MEMORY;
474        }
475        count--;
476        ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
477        ptr += qptr->index;
478        *ptr++ = cmd1;
479        if (cvmx_likely(count))
480            *ptr++ = cmd2;
481        *ptr = cvmx_ptr_to_phys(new_buffer);
482        /* The current buffer is full and has a link to the next buffer. Time
483            to write the rest of the commands into the new buffer */
484        qptr->base_ptr_div128 = *ptr >> 7;
485        qptr->index = 0;
486        if (cvmx_unlikely(count == 0))
487        {
488            qptr->index = 1;
489            new_buffer[0] = cmd2;
490        }
491    }
492
493    /* All updates are complete. Release the lock and return */
494    if (cvmx_likely(use_locking))
495        __cvmx_cmd_queue_unlock(qptr);
496    return CVMX_CMD_QUEUE_SUCCESS;
497}
498
499
500/**
501 * Simple function to write three command words to a command
502 * queue.
503 *
504 * @param queue_id Hardware command queue to write to
505 * @param use_locking
506 *                 Use internal locking to ensure exclusive access for queue
507 *                 updates. If you don't use this locking you must ensure
508 *                 exclusivity some other way. Locking is strongly recommended.
509 * @param cmd1     Command
510 * @param cmd2     Command
511 * @param cmd3     Command
512 *
513 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
514 */
515static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2, uint64_t cmd3)
516{
517    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
518
519    if (CVMX_ENABLE_PARAMETER_CHECKING)
520    {
521        if (cvmx_unlikely(qptr == NULL))
522            return CVMX_CMD_QUEUE_INVALID_PARAM;
523    }
524
525    /* Make sure nobody else is updating the same queue */
526    if (cvmx_likely(use_locking))
527        __cvmx_cmd_queue_lock(queue_id, qptr);
528
529    /* If a max queue length was specified then make sure we don't
530        exceed it. If any part of the command would be below the limit
531        we allow it */
532    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
533    {
534        if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
535        {
536            if (cvmx_likely(use_locking))
537                __cvmx_cmd_queue_unlock(qptr);
538            return CVMX_CMD_QUEUE_FULL;
539        }
540    }
541
542    /* Normally there is plenty of room in the current buffer for the command */
543    if (cvmx_likely(qptr->index + 3 < qptr->pool_size_m1))
544    {
545        uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
546        ptr += qptr->index;
547        qptr->index += 3;
548        ptr[0] = cmd1;
549        ptr[1] = cmd2;
550        ptr[2] = cmd3;
551    }
552    else
553    {
554        uint64_t *ptr;
555        /* Figure out how many command words will fit in this buffer. One
556            location will be needed for the next buffer pointer */
557        int count = qptr->pool_size_m1 - qptr->index;
558        /* We need a new comamnd buffer. Fail if there isn't one available */
559        uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
560        if (cvmx_unlikely(new_buffer == NULL))
561        {
562            if (cvmx_likely(use_locking))
563                __cvmx_cmd_queue_unlock(qptr);
564            return CVMX_CMD_QUEUE_NO_MEMORY;
565        }
566        count--;
567        ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
568        ptr += qptr->index;
569        *ptr++ = cmd1;
570        if (count)
571        {
572            *ptr++ = cmd2;
573            if (count > 1)
574                *ptr++ = cmd3;
575        }
576        *ptr = cvmx_ptr_to_phys(new_buffer);
577        /* The current buffer is full and has a link to the next buffer. Time
578            to write the rest of the commands into the new buffer */
579        qptr->base_ptr_div128 = *ptr >> 7;
580        qptr->index = 0;
581        ptr = new_buffer;
582        if (count == 0)
583        {
584            *ptr++ = cmd2;
585            qptr->index++;
586        }
587        if (count < 2)
588        {
589            *ptr++ = cmd3;
590            qptr->index++;
591        }
592    }
593
594    /* All updates are complete. Release the lock and return */
595    if (cvmx_likely(use_locking))
596        __cvmx_cmd_queue_unlock(qptr);
597    return CVMX_CMD_QUEUE_SUCCESS;
598}
599
600#ifdef	__cplusplus
601}
602#endif
603
604#endif /* __CVMX_CMD_QUEUE_H__ */
605