1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46/**
47 * @file
48 *
49 * Support functions for managing command queues used for
50 * various hardware blocks.
51 *
52 * <hr>$Revision: 70030 $<hr>
53 */
54#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
55#include <linux/module.h>
56#include <asm/octeon/cvmx.h>
57#include <asm/octeon/cvmx-bootmem.h>
58#include <asm/octeon/cvmx-npei-defs.h>
59#include <asm/octeon/cvmx-pexp-defs.h>
60#include <asm/octeon/cvmx-dpi-defs.h>
61#include <asm/octeon/cvmx-pko-defs.h>
62#include <asm/octeon/cvmx-config.h>
63#include <asm/octeon/cvmx-fpa.h>
64#include <asm/octeon/cvmx-cmd-queue.h>
65#else
66#include "cvmx.h"
67#include "cvmx-bootmem.h"
68#if !defined(__FreeBSD__) || !defined(_KERNEL)
69#include "cvmx-config.h"
70#endif
71#include "cvmx-fpa.h"
72#include "cvmx-cmd-queue.h"
73#endif
74
75
76/**
77 * This application uses this pointer to access the global queue
78 * state. It points to a bootmem named block.
79 */
80CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
81#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
82EXPORT_SYMBOL(__cvmx_cmd_queue_state_ptr);
83#endif
84
85/**
86 * @INTERNAL
87 * Initialize the Global queue state pointer.
88 *
89 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
90 */
91static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
92{
93    char *alloc_name = "cvmx_cmd_queues";
94#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
95    extern uint64_t octeon_reserve32_memory;
96#endif
97
98    if (cvmx_likely(__cvmx_cmd_queue_state_ptr))
99        return CVMX_CMD_QUEUE_SUCCESS;
100
101#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
102#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
103    if (octeon_reserve32_memory)
104        __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
105                                                              octeon_reserve32_memory,
106                                                              octeon_reserve32_memory + (CONFIG_CAVIUM_RESERVE32<<20) - 1,
107                                                              128, alloc_name);
108    else
109#endif
110        __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
111#else
112    __cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
113#endif
114    if (__cvmx_cmd_queue_state_ptr)
115        memset(__cvmx_cmd_queue_state_ptr, 0, sizeof(*__cvmx_cmd_queue_state_ptr));
116    else
117    {
118        const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
119        if (block_desc)
120            __cvmx_cmd_queue_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
121        else
122        {
123            cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", alloc_name);
124            return CVMX_CMD_QUEUE_NO_MEMORY;
125        }
126    }
127    return CVMX_CMD_QUEUE_SUCCESS;
128}
129
130
131/**
132 * Initialize a command queue for use. The initial FPA buffer is
133 * allocated and the hardware unit is configured to point to the
134 * new command queue.
135 *
136 * @param queue_id  Hardware command queue to initialize.
137 * @param max_depth Maximum outstanding commands that can be queued.
138 * @param fpa_pool  FPA pool the command queues should come from.
139 * @param pool_size Size of each buffer in the FPA pool (bytes)
140 *
141 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
142 */
143cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size)
144{
145    __cvmx_cmd_queue_state_t *qstate;
146    cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
147    if (result != CVMX_CMD_QUEUE_SUCCESS)
148        return result;
149
150    qstate = __cvmx_cmd_queue_get_state(queue_id);
151    if (qstate == NULL)
152        return CVMX_CMD_QUEUE_INVALID_PARAM;
153
154    /* We artificially limit max_depth to 1<<20 words. It is an arbitrary limit */
155    if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH)
156    {
157        if ((max_depth < 0) || (max_depth > 1<<20))
158            return CVMX_CMD_QUEUE_INVALID_PARAM;
159    }
160    else if (max_depth != 0)
161        return CVMX_CMD_QUEUE_INVALID_PARAM;
162
163    if ((fpa_pool < 0) || (fpa_pool > 7))
164        return CVMX_CMD_QUEUE_INVALID_PARAM;
165    if ((pool_size < 128) || (pool_size > 65536))
166        return CVMX_CMD_QUEUE_INVALID_PARAM;
167
168    /* See if someone else has already initialized the queue */
169    if (qstate->base_ptr_div128)
170    {
171        if (max_depth != (int)qstate->max_depth)
172        {
173            cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different max_depth (%d).\n", (int)qstate->max_depth);
174            return CVMX_CMD_QUEUE_INVALID_PARAM;
175        }
176        if (fpa_pool != qstate->fpa_pool)
177        {
178            cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool (%u).\n", qstate->fpa_pool);
179            return CVMX_CMD_QUEUE_INVALID_PARAM;
180        }
181        if ((pool_size>>3)-1 != qstate->pool_size_m1)
182        {
183            cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool size (%u).\n", (qstate->pool_size_m1+1)<<3);
184            return CVMX_CMD_QUEUE_INVALID_PARAM;
185        }
186        CVMX_SYNCWS;
187        return CVMX_CMD_QUEUE_ALREADY_SETUP;
188    }
189    else
190    {
191        cvmx_fpa_ctl_status_t status;
192        void *buffer;
193
194        status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
195        if (!status.s.enb)
196        {
197            cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: FPA is not enabled.\n");
198            return CVMX_CMD_QUEUE_NO_MEMORY;
199        }
200        buffer = cvmx_fpa_alloc(fpa_pool);
201        if (buffer == NULL)
202        {
203            cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to allocate initial buffer.\n");
204            return CVMX_CMD_QUEUE_NO_MEMORY;
205        }
206
207        memset(qstate, 0, sizeof(*qstate));
208        qstate->max_depth = max_depth;
209        qstate->fpa_pool = fpa_pool;
210        qstate->pool_size_m1 = (pool_size>>3)-1;
211        qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
212        /* We zeroed the now serving field so we need to also zero the ticket */
213        __cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
214        CVMX_SYNCWS;
215        return CVMX_CMD_QUEUE_SUCCESS;
216    }
217}
218
219
220/**
221 * Shutdown a queue a free it's command buffers to the FPA. The
222 * hardware connected to the queue must be stopped before this
223 * function is called.
224 *
225 * @param queue_id Queue to shutdown
226 *
227 * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
228 */
229cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
230{
231    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
232    if (qptr == NULL)
233    {
234        cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to get queue information.\n");
235        return CVMX_CMD_QUEUE_INVALID_PARAM;
236    }
237
238    if (cvmx_cmd_queue_length(queue_id) > 0)
239    {
240        cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still has data in it.\n");
241        return CVMX_CMD_QUEUE_FULL;
242    }
243
244    __cvmx_cmd_queue_lock(queue_id, qptr);
245    if (qptr->base_ptr_div128)
246    {
247        cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7), qptr->fpa_pool, 0);
248        qptr->base_ptr_div128 = 0;
249    }
250    __cvmx_cmd_queue_unlock(qptr);
251
252    return CVMX_CMD_QUEUE_SUCCESS;
253}
254
255
256/**
257 * Return the number of command words pending in the queue. This
258 * function may be relatively slow for some hardware units.
259 *
260 * @param queue_id Hardware command queue to query
261 *
262 * @return Number of outstanding commands
263 */
264int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
265{
266    if (CVMX_ENABLE_PARAMETER_CHECKING)
267    {
268        if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
269            return CVMX_CMD_QUEUE_INVALID_PARAM;
270    }
271
272    /* The cast is here so gcc with check that all values in the
273        cvmx_cmd_queue_id_t enumeration are here */
274    switch ((cvmx_cmd_queue_id_t)(queue_id & 0xff0000))
275    {
276        case CVMX_CMD_QUEUE_PKO_BASE:
277            /* FIXME: Need atomic lock on CVMX_PKO_REG_READ_IDX. Right now we
278                are normally called with the queue lock, so that is a SLIGHT
279                amount of protection */
280            cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
281            if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
282            {
283                cvmx_pko_mem_debug9_t debug9;
284                debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
285                return debug9.cn38xx.doorbell;
286            }
287            else
288            {
289                cvmx_pko_mem_debug8_t debug8;
290                debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
291                if (octeon_has_feature(OCTEON_FEATURE_PKND))
292                    return debug8.cn68xx.doorbell;
293                else
294                    return debug8.cn58xx.doorbell;
295            }
296        case CVMX_CMD_QUEUE_ZIP:
297        case CVMX_CMD_QUEUE_DFA:
298        case CVMX_CMD_QUEUE_RAID:
299            // FIXME: Implement other lengths
300            return 0;
301        case CVMX_CMD_QUEUE_DMA_BASE:
302            if (octeon_has_feature(OCTEON_FEATURE_NPEI))
303            {
304                cvmx_npei_dmax_counts_t dmax_counts;
305                dmax_counts.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS(queue_id & 0x7));
306                return dmax_counts.s.dbell;
307            }
308            else
309            {
310                cvmx_dpi_dmax_counts_t dmax_counts;
311                dmax_counts.u64 = cvmx_read_csr(CVMX_DPI_DMAX_COUNTS(queue_id & 0x7));
312                return dmax_counts.s.dbell;
313            }
314        case CVMX_CMD_QUEUE_END:
315            return CVMX_CMD_QUEUE_INVALID_PARAM;
316    }
317    return CVMX_CMD_QUEUE_INVALID_PARAM;
318}
319
320
321/**
322 * Return the command buffer to be written to. The purpose of this
323 * function is to allow CVMX routine access to the low level buffer
324 * for initial hardware setup. User applications should not call this
325 * function directly.
326 *
327 * @param queue_id Command queue to query
328 *
329 * @return Command buffer or NULL on failure
330 */
331void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
332{
333    __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
334    if (qptr && qptr->base_ptr_div128)
335        return cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
336    else
337        return NULL;
338}
339
340