1232809Sjmallett/***********************license start***************
2232809Sjmallett * Copyright (c) 2003-2012  Cavium Inc. (support@cavium.com). All rights
3232809Sjmallett * reserved.
4232809Sjmallett *
5232809Sjmallett *
6232809Sjmallett * Redistribution and use in source and binary forms, with or without
7232809Sjmallett * modification, are permitted provided that the following conditions are
8232809Sjmallett * met:
9232809Sjmallett *
10232809Sjmallett *   * Redistributions of source code must retain the above copyright
11232809Sjmallett *     notice, this list of conditions and the following disclaimer.
12232809Sjmallett *
13232809Sjmallett *   * Redistributions in binary form must reproduce the above
14232809Sjmallett *     copyright notice, this list of conditions and the following
15232809Sjmallett *     disclaimer in the documentation and/or other materials provided
16232809Sjmallett *     with the distribution.
17232809Sjmallett
18232809Sjmallett *   * Neither the name of Cavium Inc. nor the names of
19232809Sjmallett *     its contributors may be used to endorse or promote products
20232809Sjmallett *     derived from this software without specific prior written
21232809Sjmallett *     permission.
22232809Sjmallett
23232809Sjmallett * This Software, including technical data, may be subject to U.S. export  control
24232809Sjmallett * laws, including the U.S. Export Administration Act and its  associated
25232809Sjmallett * regulations, and may be subject to export or import  regulations in other
26232809Sjmallett * countries.
27232809Sjmallett
28232809Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29232809Sjmallett * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30232809Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31232809Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32232809Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33232809Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34232809Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35232809Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36232809Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37232809Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38232809Sjmallett ***********************license end**************************************/
39232809Sjmallett
40232809Sjmallett
41232809Sjmallett/**
42232809Sjmallett * cvmx-sso-defs.h
43232809Sjmallett *
44232809Sjmallett * Configuration and status register (CSR) type definitions for
45232809Sjmallett * Octeon sso.
46232809Sjmallett *
47232809Sjmallett * This file is auto generated. Do not edit.
48232809Sjmallett *
49232809Sjmallett * <hr>$Revision$<hr>
50232809Sjmallett *
51232809Sjmallett */
52232809Sjmallett#ifndef __CVMX_SSO_DEFS_H__
53232809Sjmallett#define __CVMX_SSO_DEFS_H__
54232809Sjmallett
55232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56232809Sjmallett#define CVMX_SSO_ACTIVE_CYCLES CVMX_SSO_ACTIVE_CYCLES_FUNC()
57232809Sjmallettstatic inline uint64_t CVMX_SSO_ACTIVE_CYCLES_FUNC(void)
58232809Sjmallett{
59232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
60232809Sjmallett		cvmx_warn("CVMX_SSO_ACTIVE_CYCLES not supported on this chip\n");
61232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010E8ull);
62232809Sjmallett}
63232809Sjmallett#else
64232809Sjmallett#define CVMX_SSO_ACTIVE_CYCLES (CVMX_ADD_IO_SEG(0x00016700000010E8ull))
65232809Sjmallett#endif
66232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67232809Sjmallett#define CVMX_SSO_BIST_STAT CVMX_SSO_BIST_STAT_FUNC()
68232809Sjmallettstatic inline uint64_t CVMX_SSO_BIST_STAT_FUNC(void)
69232809Sjmallett{
70232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
71232809Sjmallett		cvmx_warn("CVMX_SSO_BIST_STAT not supported on this chip\n");
72232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001078ull);
73232809Sjmallett}
74232809Sjmallett#else
75232809Sjmallett#define CVMX_SSO_BIST_STAT (CVMX_ADD_IO_SEG(0x0001670000001078ull))
76232809Sjmallett#endif
77232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78232809Sjmallett#define CVMX_SSO_CFG CVMX_SSO_CFG_FUNC()
79232809Sjmallettstatic inline uint64_t CVMX_SSO_CFG_FUNC(void)
80232809Sjmallett{
81232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
82232809Sjmallett		cvmx_warn("CVMX_SSO_CFG not supported on this chip\n");
83232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001088ull);
84232809Sjmallett}
85232809Sjmallett#else
86232809Sjmallett#define CVMX_SSO_CFG (CVMX_ADD_IO_SEG(0x0001670000001088ull))
87232809Sjmallett#endif
88232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89232809Sjmallett#define CVMX_SSO_DS_PC CVMX_SSO_DS_PC_FUNC()
90232809Sjmallettstatic inline uint64_t CVMX_SSO_DS_PC_FUNC(void)
91232809Sjmallett{
92232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
93232809Sjmallett		cvmx_warn("CVMX_SSO_DS_PC not supported on this chip\n");
94232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001070ull);
95232809Sjmallett}
96232809Sjmallett#else
97232809Sjmallett#define CVMX_SSO_DS_PC (CVMX_ADD_IO_SEG(0x0001670000001070ull))
98232809Sjmallett#endif
99232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100232809Sjmallett#define CVMX_SSO_ERR CVMX_SSO_ERR_FUNC()
101232809Sjmallettstatic inline uint64_t CVMX_SSO_ERR_FUNC(void)
102232809Sjmallett{
103232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
104232809Sjmallett		cvmx_warn("CVMX_SSO_ERR not supported on this chip\n");
105232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001038ull);
106232809Sjmallett}
107232809Sjmallett#else
108232809Sjmallett#define CVMX_SSO_ERR (CVMX_ADD_IO_SEG(0x0001670000001038ull))
109232809Sjmallett#endif
110232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
111232809Sjmallett#define CVMX_SSO_ERR_ENB CVMX_SSO_ERR_ENB_FUNC()
112232809Sjmallettstatic inline uint64_t CVMX_SSO_ERR_ENB_FUNC(void)
113232809Sjmallett{
114232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
115232809Sjmallett		cvmx_warn("CVMX_SSO_ERR_ENB not supported on this chip\n");
116232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001030ull);
117232809Sjmallett}
118232809Sjmallett#else
119232809Sjmallett#define CVMX_SSO_ERR_ENB (CVMX_ADD_IO_SEG(0x0001670000001030ull))
120232809Sjmallett#endif
121232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
122232809Sjmallett#define CVMX_SSO_FIDX_ECC_CTL CVMX_SSO_FIDX_ECC_CTL_FUNC()
123232809Sjmallettstatic inline uint64_t CVMX_SSO_FIDX_ECC_CTL_FUNC(void)
124232809Sjmallett{
125232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
126232809Sjmallett		cvmx_warn("CVMX_SSO_FIDX_ECC_CTL not supported on this chip\n");
127232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010D0ull);
128232809Sjmallett}
129232809Sjmallett#else
130232809Sjmallett#define CVMX_SSO_FIDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010D0ull))
131232809Sjmallett#endif
132232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
133232809Sjmallett#define CVMX_SSO_FIDX_ECC_ST CVMX_SSO_FIDX_ECC_ST_FUNC()
134232809Sjmallettstatic inline uint64_t CVMX_SSO_FIDX_ECC_ST_FUNC(void)
135232809Sjmallett{
136232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
137232809Sjmallett		cvmx_warn("CVMX_SSO_FIDX_ECC_ST not supported on this chip\n");
138232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010D8ull);
139232809Sjmallett}
140232809Sjmallett#else
141232809Sjmallett#define CVMX_SSO_FIDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010D8ull))
142232809Sjmallett#endif
143232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
144232809Sjmallett#define CVMX_SSO_FPAGE_CNT CVMX_SSO_FPAGE_CNT_FUNC()
145232809Sjmallettstatic inline uint64_t CVMX_SSO_FPAGE_CNT_FUNC(void)
146232809Sjmallett{
147232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
148232809Sjmallett		cvmx_warn("CVMX_SSO_FPAGE_CNT not supported on this chip\n");
149232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001090ull);
150232809Sjmallett}
151232809Sjmallett#else
152232809Sjmallett#define CVMX_SSO_FPAGE_CNT (CVMX_ADD_IO_SEG(0x0001670000001090ull))
153232809Sjmallett#endif
154232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
155232809Sjmallett#define CVMX_SSO_GWE_CFG CVMX_SSO_GWE_CFG_FUNC()
156232809Sjmallettstatic inline uint64_t CVMX_SSO_GWE_CFG_FUNC(void)
157232809Sjmallett{
158232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
159232809Sjmallett		cvmx_warn("CVMX_SSO_GWE_CFG not supported on this chip\n");
160232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001098ull);
161232809Sjmallett}
162232809Sjmallett#else
163232809Sjmallett#define CVMX_SSO_GWE_CFG (CVMX_ADD_IO_SEG(0x0001670000001098ull))
164232809Sjmallett#endif
165232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
166232809Sjmallett#define CVMX_SSO_IDX_ECC_CTL CVMX_SSO_IDX_ECC_CTL_FUNC()
167232809Sjmallettstatic inline uint64_t CVMX_SSO_IDX_ECC_CTL_FUNC(void)
168232809Sjmallett{
169232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
170232809Sjmallett		cvmx_warn("CVMX_SSO_IDX_ECC_CTL not supported on this chip\n");
171232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010C0ull);
172232809Sjmallett}
173232809Sjmallett#else
174232809Sjmallett#define CVMX_SSO_IDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010C0ull))
175232809Sjmallett#endif
176232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
177232809Sjmallett#define CVMX_SSO_IDX_ECC_ST CVMX_SSO_IDX_ECC_ST_FUNC()
178232809Sjmallettstatic inline uint64_t CVMX_SSO_IDX_ECC_ST_FUNC(void)
179232809Sjmallett{
180232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
181232809Sjmallett		cvmx_warn("CVMX_SSO_IDX_ECC_ST not supported on this chip\n");
182232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010C8ull);
183232809Sjmallett}
184232809Sjmallett#else
185232809Sjmallett#define CVMX_SSO_IDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010C8ull))
186232809Sjmallett#endif
187232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
188232809Sjmallettstatic inline uint64_t CVMX_SSO_IQ_CNTX(unsigned long offset)
189232809Sjmallett{
190232809Sjmallett	if (!(
191232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
192232809Sjmallett		cvmx_warn("CVMX_SSO_IQ_CNTX(%lu) is invalid on this chip\n", offset);
193232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8;
194232809Sjmallett}
195232809Sjmallett#else
196232809Sjmallett#define CVMX_SSO_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8)
197232809Sjmallett#endif
198232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
199232809Sjmallett#define CVMX_SSO_IQ_COM_CNT CVMX_SSO_IQ_COM_CNT_FUNC()
200232809Sjmallettstatic inline uint64_t CVMX_SSO_IQ_COM_CNT_FUNC(void)
201232809Sjmallett{
202232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
203232809Sjmallett		cvmx_warn("CVMX_SSO_IQ_COM_CNT not supported on this chip\n");
204232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001058ull);
205232809Sjmallett}
206232809Sjmallett#else
207232809Sjmallett#define CVMX_SSO_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000001058ull))
208232809Sjmallett#endif
209232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
210232809Sjmallett#define CVMX_SSO_IQ_INT CVMX_SSO_IQ_INT_FUNC()
211232809Sjmallettstatic inline uint64_t CVMX_SSO_IQ_INT_FUNC(void)
212232809Sjmallett{
213232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
214232809Sjmallett		cvmx_warn("CVMX_SSO_IQ_INT not supported on this chip\n");
215232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001048ull);
216232809Sjmallett}
217232809Sjmallett#else
218232809Sjmallett#define CVMX_SSO_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000001048ull))
219232809Sjmallett#endif
220232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221232809Sjmallett#define CVMX_SSO_IQ_INT_EN CVMX_SSO_IQ_INT_EN_FUNC()
222232809Sjmallettstatic inline uint64_t CVMX_SSO_IQ_INT_EN_FUNC(void)
223232809Sjmallett{
224232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
225232809Sjmallett		cvmx_warn("CVMX_SSO_IQ_INT_EN not supported on this chip\n");
226232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001050ull);
227232809Sjmallett}
228232809Sjmallett#else
229232809Sjmallett#define CVMX_SSO_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000001050ull))
230232809Sjmallett#endif
231232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
232232809Sjmallettstatic inline uint64_t CVMX_SSO_IQ_THRX(unsigned long offset)
233232809Sjmallett{
234232809Sjmallett	if (!(
235232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
236232809Sjmallett		cvmx_warn("CVMX_SSO_IQ_THRX(%lu) is invalid on this chip\n", offset);
237232809Sjmallett	return CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8;
238232809Sjmallett}
239232809Sjmallett#else
240232809Sjmallett#define CVMX_SSO_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8)
241232809Sjmallett#endif
242232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243232809Sjmallett#define CVMX_SSO_NOS_CNT CVMX_SSO_NOS_CNT_FUNC()
244232809Sjmallettstatic inline uint64_t CVMX_SSO_NOS_CNT_FUNC(void)
245232809Sjmallett{
246232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
247232809Sjmallett		cvmx_warn("CVMX_SSO_NOS_CNT not supported on this chip\n");
248232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001040ull);
249232809Sjmallett}
250232809Sjmallett#else
251232809Sjmallett#define CVMX_SSO_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000001040ull))
252232809Sjmallett#endif
253232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
254232809Sjmallett#define CVMX_SSO_NW_TIM CVMX_SSO_NW_TIM_FUNC()
255232809Sjmallettstatic inline uint64_t CVMX_SSO_NW_TIM_FUNC(void)
256232809Sjmallett{
257232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
258232809Sjmallett		cvmx_warn("CVMX_SSO_NW_TIM not supported on this chip\n");
259232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001028ull);
260232809Sjmallett}
261232809Sjmallett#else
262232809Sjmallett#define CVMX_SSO_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000001028ull))
263232809Sjmallett#endif
264232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
265232809Sjmallett#define CVMX_SSO_OTH_ECC_CTL CVMX_SSO_OTH_ECC_CTL_FUNC()
266232809Sjmallettstatic inline uint64_t CVMX_SSO_OTH_ECC_CTL_FUNC(void)
267232809Sjmallett{
268232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
269232809Sjmallett		cvmx_warn("CVMX_SSO_OTH_ECC_CTL not supported on this chip\n");
270232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010B0ull);
271232809Sjmallett}
272232809Sjmallett#else
273232809Sjmallett#define CVMX_SSO_OTH_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010B0ull))
274232809Sjmallett#endif
275232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
276232809Sjmallett#define CVMX_SSO_OTH_ECC_ST CVMX_SSO_OTH_ECC_ST_FUNC()
277232809Sjmallettstatic inline uint64_t CVMX_SSO_OTH_ECC_ST_FUNC(void)
278232809Sjmallett{
279232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
280232809Sjmallett		cvmx_warn("CVMX_SSO_OTH_ECC_ST not supported on this chip\n");
281232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010B8ull);
282232809Sjmallett}
283232809Sjmallett#else
284232809Sjmallett#define CVMX_SSO_OTH_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010B8ull))
285232809Sjmallett#endif
286232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
287232809Sjmallett#define CVMX_SSO_PND_ECC_CTL CVMX_SSO_PND_ECC_CTL_FUNC()
288232809Sjmallettstatic inline uint64_t CVMX_SSO_PND_ECC_CTL_FUNC(void)
289232809Sjmallett{
290232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
291232809Sjmallett		cvmx_warn("CVMX_SSO_PND_ECC_CTL not supported on this chip\n");
292232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010A0ull);
293232809Sjmallett}
294232809Sjmallett#else
295232809Sjmallett#define CVMX_SSO_PND_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010A0ull))
296232809Sjmallett#endif
297232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
298232809Sjmallett#define CVMX_SSO_PND_ECC_ST CVMX_SSO_PND_ECC_ST_FUNC()
299232809Sjmallettstatic inline uint64_t CVMX_SSO_PND_ECC_ST_FUNC(void)
300232809Sjmallett{
301232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
302232809Sjmallett		cvmx_warn("CVMX_SSO_PND_ECC_ST not supported on this chip\n");
303232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010A8ull);
304232809Sjmallett}
305232809Sjmallett#else
306232809Sjmallett#define CVMX_SSO_PND_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010A8ull))
307232809Sjmallett#endif
308232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
309232809Sjmallettstatic inline uint64_t CVMX_SSO_PPX_GRP_MSK(unsigned long offset)
310232809Sjmallett{
311232809Sjmallett	if (!(
312232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
313232809Sjmallett		cvmx_warn("CVMX_SSO_PPX_GRP_MSK(%lu) is invalid on this chip\n", offset);
314232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8;
315232809Sjmallett}
316232809Sjmallett#else
317232809Sjmallett#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
318232809Sjmallett#endif
319232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
320232809Sjmallettstatic inline uint64_t CVMX_SSO_PPX_QOS_PRI(unsigned long offset)
321232809Sjmallett{
322232809Sjmallett	if (!(
323232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
324232809Sjmallett		cvmx_warn("CVMX_SSO_PPX_QOS_PRI(%lu) is invalid on this chip\n", offset);
325232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8;
326232809Sjmallett}
327232809Sjmallett#else
328232809Sjmallett#define CVMX_SSO_PPX_QOS_PRI(offset) (CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8)
329232809Sjmallett#endif
330232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
331232809Sjmallett#define CVMX_SSO_PP_STRICT CVMX_SSO_PP_STRICT_FUNC()
332232809Sjmallettstatic inline uint64_t CVMX_SSO_PP_STRICT_FUNC(void)
333232809Sjmallett{
334232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
335232809Sjmallett		cvmx_warn("CVMX_SSO_PP_STRICT not supported on this chip\n");
336232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010E0ull);
337232809Sjmallett}
338232809Sjmallett#else
339232809Sjmallett#define CVMX_SSO_PP_STRICT (CVMX_ADD_IO_SEG(0x00016700000010E0ull))
340232809Sjmallett#endif
341232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
342232809Sjmallettstatic inline uint64_t CVMX_SSO_QOSX_RND(unsigned long offset)
343232809Sjmallett{
344232809Sjmallett	if (!(
345232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
346232809Sjmallett		cvmx_warn("CVMX_SSO_QOSX_RND(%lu) is invalid on this chip\n", offset);
347232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8;
348232809Sjmallett}
349232809Sjmallett#else
350232809Sjmallett#define CVMX_SSO_QOSX_RND(offset) (CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8)
351232809Sjmallett#endif
352232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
353232809Sjmallettstatic inline uint64_t CVMX_SSO_QOS_THRX(unsigned long offset)
354232809Sjmallett{
355232809Sjmallett	if (!(
356232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
357232809Sjmallett		cvmx_warn("CVMX_SSO_QOS_THRX(%lu) is invalid on this chip\n", offset);
358232809Sjmallett	return CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8;
359232809Sjmallett}
360232809Sjmallett#else
361232809Sjmallett#define CVMX_SSO_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8)
362232809Sjmallett#endif
363232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
364232809Sjmallett#define CVMX_SSO_QOS_WE CVMX_SSO_QOS_WE_FUNC()
365232809Sjmallettstatic inline uint64_t CVMX_SSO_QOS_WE_FUNC(void)
366232809Sjmallett{
367232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
368232809Sjmallett		cvmx_warn("CVMX_SSO_QOS_WE not supported on this chip\n");
369232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001080ull);
370232809Sjmallett}
371232809Sjmallett#else
372232809Sjmallett#define CVMX_SSO_QOS_WE (CVMX_ADD_IO_SEG(0x0001670000001080ull))
373232809Sjmallett#endif
374232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
375232809Sjmallett#define CVMX_SSO_RESET CVMX_SSO_RESET_FUNC()
376232809Sjmallettstatic inline uint64_t CVMX_SSO_RESET_FUNC(void)
377232809Sjmallett{
378232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
379232809Sjmallett		cvmx_warn("CVMX_SSO_RESET not supported on this chip\n");
380232809Sjmallett	return CVMX_ADD_IO_SEG(0x00016700000010F0ull);
381232809Sjmallett}
382232809Sjmallett#else
383232809Sjmallett#define CVMX_SSO_RESET (CVMX_ADD_IO_SEG(0x00016700000010F0ull))
384232809Sjmallett#endif
385232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
386232809Sjmallettstatic inline uint64_t CVMX_SSO_RWQ_HEAD_PTRX(unsigned long offset)
387232809Sjmallett{
388232809Sjmallett	if (!(
389232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
390232809Sjmallett		cvmx_warn("CVMX_SSO_RWQ_HEAD_PTRX(%lu) is invalid on this chip\n", offset);
391232809Sjmallett	return CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8;
392232809Sjmallett}
393232809Sjmallett#else
394232809Sjmallett#define CVMX_SSO_RWQ_HEAD_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8)
395232809Sjmallett#endif
396232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397232809Sjmallett#define CVMX_SSO_RWQ_POP_FPTR CVMX_SSO_RWQ_POP_FPTR_FUNC()
398232809Sjmallettstatic inline uint64_t CVMX_SSO_RWQ_POP_FPTR_FUNC(void)
399232809Sjmallett{
400232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
401232809Sjmallett		cvmx_warn("CVMX_SSO_RWQ_POP_FPTR not supported on this chip\n");
402232809Sjmallett	return CVMX_ADD_IO_SEG(0x000167000000C408ull);
403232809Sjmallett}
404232809Sjmallett#else
405232809Sjmallett#define CVMX_SSO_RWQ_POP_FPTR (CVMX_ADD_IO_SEG(0x000167000000C408ull))
406232809Sjmallett#endif
407232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
408232809Sjmallett#define CVMX_SSO_RWQ_PSH_FPTR CVMX_SSO_RWQ_PSH_FPTR_FUNC()
409232809Sjmallettstatic inline uint64_t CVMX_SSO_RWQ_PSH_FPTR_FUNC(void)
410232809Sjmallett{
411232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
412232809Sjmallett		cvmx_warn("CVMX_SSO_RWQ_PSH_FPTR not supported on this chip\n");
413232809Sjmallett	return CVMX_ADD_IO_SEG(0x000167000000C400ull);
414232809Sjmallett}
415232809Sjmallett#else
416232809Sjmallett#define CVMX_SSO_RWQ_PSH_FPTR (CVMX_ADD_IO_SEG(0x000167000000C400ull))
417232809Sjmallett#endif
418232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
419232809Sjmallettstatic inline uint64_t CVMX_SSO_RWQ_TAIL_PTRX(unsigned long offset)
420232809Sjmallett{
421232809Sjmallett	if (!(
422232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
423232809Sjmallett		cvmx_warn("CVMX_SSO_RWQ_TAIL_PTRX(%lu) is invalid on this chip\n", offset);
424232809Sjmallett	return CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8;
425232809Sjmallett}
426232809Sjmallett#else
427232809Sjmallett#define CVMX_SSO_RWQ_TAIL_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8)
428232809Sjmallett#endif
429232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
430232809Sjmallett#define CVMX_SSO_TS_PC CVMX_SSO_TS_PC_FUNC()
431232809Sjmallettstatic inline uint64_t CVMX_SSO_TS_PC_FUNC(void)
432232809Sjmallett{
433232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
434232809Sjmallett		cvmx_warn("CVMX_SSO_TS_PC not supported on this chip\n");
435232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001068ull);
436232809Sjmallett}
437232809Sjmallett#else
438232809Sjmallett#define CVMX_SSO_TS_PC (CVMX_ADD_IO_SEG(0x0001670000001068ull))
439232809Sjmallett#endif
440232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
441232809Sjmallett#define CVMX_SSO_WA_COM_PC CVMX_SSO_WA_COM_PC_FUNC()
442232809Sjmallettstatic inline uint64_t CVMX_SSO_WA_COM_PC_FUNC(void)
443232809Sjmallett{
444232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
445232809Sjmallett		cvmx_warn("CVMX_SSO_WA_COM_PC not supported on this chip\n");
446232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001060ull);
447232809Sjmallett}
448232809Sjmallett#else
449232809Sjmallett#define CVMX_SSO_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000001060ull))
450232809Sjmallett#endif
451232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
452232809Sjmallettstatic inline uint64_t CVMX_SSO_WA_PCX(unsigned long offset)
453232809Sjmallett{
454232809Sjmallett	if (!(
455232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
456232809Sjmallett		cvmx_warn("CVMX_SSO_WA_PCX(%lu) is invalid on this chip\n", offset);
457232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8;
458232809Sjmallett}
459232809Sjmallett#else
460232809Sjmallett#define CVMX_SSO_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8)
461232809Sjmallett#endif
462232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
463232809Sjmallett#define CVMX_SSO_WQ_INT CVMX_SSO_WQ_INT_FUNC()
464232809Sjmallettstatic inline uint64_t CVMX_SSO_WQ_INT_FUNC(void)
465232809Sjmallett{
466232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
467232809Sjmallett		cvmx_warn("CVMX_SSO_WQ_INT not supported on this chip\n");
468232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001000ull);
469232809Sjmallett}
470232809Sjmallett#else
471232809Sjmallett#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
472232809Sjmallett#endif
473232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
474232809Sjmallettstatic inline uint64_t CVMX_SSO_WQ_INT_CNTX(unsigned long offset)
475232809Sjmallett{
476232809Sjmallett	if (!(
477232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
478232809Sjmallett		cvmx_warn("CVMX_SSO_WQ_INT_CNTX(%lu) is invalid on this chip\n", offset);
479232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8;
480232809Sjmallett}
481232809Sjmallett#else
482232809Sjmallett#define CVMX_SSO_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8)
483232809Sjmallett#endif
484232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
485232809Sjmallett#define CVMX_SSO_WQ_INT_PC CVMX_SSO_WQ_INT_PC_FUNC()
486232809Sjmallettstatic inline uint64_t CVMX_SSO_WQ_INT_PC_FUNC(void)
487232809Sjmallett{
488232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
489232809Sjmallett		cvmx_warn("CVMX_SSO_WQ_INT_PC not supported on this chip\n");
490232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001020ull);
491232809Sjmallett}
492232809Sjmallett#else
493232809Sjmallett#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
494232809Sjmallett#endif
495232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
496232809Sjmallettstatic inline uint64_t CVMX_SSO_WQ_INT_THRX(unsigned long offset)
497232809Sjmallett{
498232809Sjmallett	if (!(
499232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
500232809Sjmallett		cvmx_warn("CVMX_SSO_WQ_INT_THRX(%lu) is invalid on this chip\n", offset);
501232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8;
502232809Sjmallett}
503232809Sjmallett#else
504232809Sjmallett#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
505232809Sjmallett#endif
506232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
507232809Sjmallett#define CVMX_SSO_WQ_IQ_DIS CVMX_SSO_WQ_IQ_DIS_FUNC()
508232809Sjmallettstatic inline uint64_t CVMX_SSO_WQ_IQ_DIS_FUNC(void)
509232809Sjmallett{
510232809Sjmallett	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
511232809Sjmallett		cvmx_warn("CVMX_SSO_WQ_IQ_DIS not supported on this chip\n");
512232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000001010ull);
513232809Sjmallett}
514232809Sjmallett#else
515232809Sjmallett#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
516232809Sjmallett#endif
517232809Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
518232809Sjmallettstatic inline uint64_t CVMX_SSO_WS_PCX(unsigned long offset)
519232809Sjmallett{
520232809Sjmallett	if (!(
521232809Sjmallett	      (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
522232809Sjmallett		cvmx_warn("CVMX_SSO_WS_PCX(%lu) is invalid on this chip\n", offset);
523232809Sjmallett	return CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8;
524232809Sjmallett}
525232809Sjmallett#else
526232809Sjmallett#define CVMX_SSO_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8)
527232809Sjmallett#endif
528232809Sjmallett
529232809Sjmallett/**
530232809Sjmallett * cvmx_sso_active_cycles
531232809Sjmallett *
532232809Sjmallett * SSO_ACTIVE_CYCLES = SSO cycles SSO active
533232809Sjmallett *
534232809Sjmallett * This register counts every sclk cycle that the SSO clocks are active.
535232809Sjmallett * **NOTE: Added in pass 2.0
536232809Sjmallett */
537232809Sjmallettunion cvmx_sso_active_cycles {
538232809Sjmallett	uint64_t u64;
539232809Sjmallett	struct cvmx_sso_active_cycles_s {
540232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
541232809Sjmallett	uint64_t act_cyc                      : 64; /**< Counts number of active cycles. */
542232809Sjmallett#else
543232809Sjmallett	uint64_t act_cyc                      : 64;
544232809Sjmallett#endif
545232809Sjmallett	} s;
546232809Sjmallett	struct cvmx_sso_active_cycles_s       cn68xx;
547232809Sjmallett};
548232809Sjmalletttypedef union cvmx_sso_active_cycles cvmx_sso_active_cycles_t;
549232809Sjmallett
550232809Sjmallett/**
551232809Sjmallett * cvmx_sso_bist_stat
552232809Sjmallett *
553232809Sjmallett * SSO_BIST_STAT = SSO BIST Status Register
554232809Sjmallett *
555232809Sjmallett * Contains the BIST status for the SSO memories ('0' = pass, '1' = fail).
556232809Sjmallett * Note that PP BIST status is not reported here as it was in previous designs.
557232809Sjmallett *
558232809Sjmallett *   There may be more for DDR interface buffers.
559232809Sjmallett *   It's possible that a RAM will be used for SSO_PP_QOS_RND.
560232809Sjmallett */
561232809Sjmallettunion cvmx_sso_bist_stat {
562232809Sjmallett	uint64_t u64;
563232809Sjmallett	struct cvmx_sso_bist_stat_s {
564232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
565232809Sjmallett	uint64_t reserved_62_63               : 2;
566232809Sjmallett	uint64_t odu_pref                     : 2;  /**< ODU Prefetch memory BIST status */
567232809Sjmallett	uint64_t reserved_54_59               : 6;
568232809Sjmallett	uint64_t fptr                         : 2;  /**< FPTR memory BIST status */
569232809Sjmallett	uint64_t reserved_45_51               : 7;
570232809Sjmallett	uint64_t rwo_dat                      : 1;  /**< RWO_DAT memory BIST status */
571232809Sjmallett	uint64_t rwo                          : 2;  /**< RWO memory BIST status */
572232809Sjmallett	uint64_t reserved_35_41               : 7;
573232809Sjmallett	uint64_t rwi_dat                      : 1;  /**< RWI_DAT memory BIST status */
574232809Sjmallett	uint64_t reserved_32_33               : 2;
575232809Sjmallett	uint64_t soc                          : 1;  /**< SSO CAM BIST status */
576232809Sjmallett	uint64_t reserved_28_30               : 3;
577232809Sjmallett	uint64_t ncbo                         : 4;  /**< NCBO transmitter memory BIST status */
578232809Sjmallett	uint64_t reserved_21_23               : 3;
579232809Sjmallett	uint64_t index                        : 1;  /**< Index memory BIST status */
580232809Sjmallett	uint64_t reserved_17_19               : 3;
581232809Sjmallett	uint64_t fidx                         : 1;  /**< Forward index memory BIST status */
582232809Sjmallett	uint64_t reserved_10_15               : 6;
583232809Sjmallett	uint64_t pend                         : 2;  /**< Pending switch memory BIST status */
584232809Sjmallett	uint64_t reserved_2_7                 : 6;
585232809Sjmallett	uint64_t oth                          : 2;  /**< WQP, GRP memory BIST status */
586232809Sjmallett#else
587232809Sjmallett	uint64_t oth                          : 2;
588232809Sjmallett	uint64_t reserved_2_7                 : 6;
589232809Sjmallett	uint64_t pend                         : 2;
590232809Sjmallett	uint64_t reserved_10_15               : 6;
591232809Sjmallett	uint64_t fidx                         : 1;
592232809Sjmallett	uint64_t reserved_17_19               : 3;
593232809Sjmallett	uint64_t index                        : 1;
594232809Sjmallett	uint64_t reserved_21_23               : 3;
595232809Sjmallett	uint64_t ncbo                         : 4;
596232809Sjmallett	uint64_t reserved_28_30               : 3;
597232809Sjmallett	uint64_t soc                          : 1;
598232809Sjmallett	uint64_t reserved_32_33               : 2;
599232809Sjmallett	uint64_t rwi_dat                      : 1;
600232809Sjmallett	uint64_t reserved_35_41               : 7;
601232809Sjmallett	uint64_t rwo                          : 2;
602232809Sjmallett	uint64_t rwo_dat                      : 1;
603232809Sjmallett	uint64_t reserved_45_51               : 7;
604232809Sjmallett	uint64_t fptr                         : 2;
605232809Sjmallett	uint64_t reserved_54_59               : 6;
606232809Sjmallett	uint64_t odu_pref                     : 2;
607232809Sjmallett	uint64_t reserved_62_63               : 2;
608232809Sjmallett#endif
609232809Sjmallett	} s;
610232809Sjmallett	struct cvmx_sso_bist_stat_s           cn68xx;
611232809Sjmallett	struct cvmx_sso_bist_stat_cn68xxp1 {
612232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
613232809Sjmallett	uint64_t reserved_54_63               : 10;
614232809Sjmallett	uint64_t fptr                         : 2;  /**< FPTR memory BIST status */
615232809Sjmallett	uint64_t reserved_45_51               : 7;
616232809Sjmallett	uint64_t rwo_dat                      : 1;  /**< RWO_DAT memory BIST status */
617232809Sjmallett	uint64_t rwo                          : 2;  /**< RWO memory BIST status */
618232809Sjmallett	uint64_t reserved_35_41               : 7;
619232809Sjmallett	uint64_t rwi_dat                      : 1;  /**< RWI_DAT memory BIST status */
620232809Sjmallett	uint64_t reserved_32_33               : 2;
621232809Sjmallett	uint64_t soc                          : 1;  /**< SSO CAM BIST status */
622232809Sjmallett	uint64_t reserved_28_30               : 3;
623232809Sjmallett	uint64_t ncbo                         : 4;  /**< NCBO transmitter memory BIST status */
624232809Sjmallett	uint64_t reserved_21_23               : 3;
625232809Sjmallett	uint64_t index                        : 1;  /**< Index memory BIST status */
626232809Sjmallett	uint64_t reserved_17_19               : 3;
627232809Sjmallett	uint64_t fidx                         : 1;  /**< Forward index memory BIST status */
628232809Sjmallett	uint64_t reserved_10_15               : 6;
629232809Sjmallett	uint64_t pend                         : 2;  /**< Pending switch memory BIST status */
630232809Sjmallett	uint64_t reserved_2_7                 : 6;
631232809Sjmallett	uint64_t oth                          : 2;  /**< WQP, GRP memory BIST status */
632232809Sjmallett#else
633232809Sjmallett	uint64_t oth                          : 2;
634232809Sjmallett	uint64_t reserved_2_7                 : 6;
635232809Sjmallett	uint64_t pend                         : 2;
636232809Sjmallett	uint64_t reserved_10_15               : 6;
637232809Sjmallett	uint64_t fidx                         : 1;
638232809Sjmallett	uint64_t reserved_17_19               : 3;
639232809Sjmallett	uint64_t index                        : 1;
640232809Sjmallett	uint64_t reserved_21_23               : 3;
641232809Sjmallett	uint64_t ncbo                         : 4;
642232809Sjmallett	uint64_t reserved_28_30               : 3;
643232809Sjmallett	uint64_t soc                          : 1;
644232809Sjmallett	uint64_t reserved_32_33               : 2;
645232809Sjmallett	uint64_t rwi_dat                      : 1;
646232809Sjmallett	uint64_t reserved_35_41               : 7;
647232809Sjmallett	uint64_t rwo                          : 2;
648232809Sjmallett	uint64_t rwo_dat                      : 1;
649232809Sjmallett	uint64_t reserved_45_51               : 7;
650232809Sjmallett	uint64_t fptr                         : 2;
651232809Sjmallett	uint64_t reserved_54_63               : 10;
652232809Sjmallett#endif
653232809Sjmallett	} cn68xxp1;
654232809Sjmallett};
655232809Sjmalletttypedef union cvmx_sso_bist_stat cvmx_sso_bist_stat_t;
656232809Sjmallett
657232809Sjmallett/**
658232809Sjmallett * cvmx_sso_cfg
659232809Sjmallett *
660232809Sjmallett * SSO_CFG = SSO Config
661232809Sjmallett *
662232809Sjmallett * This register is an assortment of various SSO configuration bits.
663232809Sjmallett */
664232809Sjmallettunion cvmx_sso_cfg {
665232809Sjmallett	uint64_t u64;
666232809Sjmallett	struct cvmx_sso_cfg_s {
667232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
668232809Sjmallett	uint64_t reserved_16_63               : 48;
669232809Sjmallett	uint64_t qck_gw_rsp_adj               : 3;  /**< Fast GET_WORK response fine adjustment
670232809Sjmallett                                                         Allowed values are 0, 1, and 2 (0 is quickest) */
671232809Sjmallett	uint64_t qck_gw_rsp_dis               : 1;  /**< Disable faster response to GET_WORK */
672232809Sjmallett	uint64_t qck_sw_dis                   : 1;  /**< Disable faster switch to UNSCHEDULED on GET_WORK */
673232809Sjmallett	uint64_t rwq_alloc_dis                : 1;  /**< Disable FPA Alloc Requests when SSO_FPAGE_CNT < 16 */
674232809Sjmallett	uint64_t soc_ccam_dis                 : 1;  /**< Disable power saving SOC conditional CAM
675232809Sjmallett                                                         (**NOTE: Added in pass 2.0) */
676232809Sjmallett	uint64_t sso_cclk_dis                 : 1;  /**< Disable power saving SSO conditional clocking
677232809Sjmallett                                                         (**NOTE: Added in pass 2.0) */
678232809Sjmallett	uint64_t rwo_flush                    : 1;  /**< Flush RWO engine
679232809Sjmallett                                                         Allows outbound NCB entries to go immediately rather
680232809Sjmallett                                                         than waiting for a complete fill packet. This register
681232809Sjmallett                                                         is one-shot and clears itself each time it is set. */
682232809Sjmallett	uint64_t wfe_thr                      : 1;  /**< Use 1 Work-fetch engine (instead of 4) */
683232809Sjmallett	uint64_t rwio_byp_dis                 : 1;  /**< Disable Bypass path in RWI/RWO Engines */
684232809Sjmallett	uint64_t rwq_byp_dis                  : 1;  /**< Disable Bypass path in RWQ Engine */
685232809Sjmallett	uint64_t stt                          : 1;  /**< STT Setting for RW Stores */
686232809Sjmallett	uint64_t ldt                          : 1;  /**< LDT Setting for RW Loads */
687232809Sjmallett	uint64_t dwb                          : 1;  /**< DWB Setting for Return Page Requests
688232809Sjmallett                                                         1 = 2 128B cache pages to issue DWB for
689232809Sjmallett                                                         0 = 0 128B cache pages ro issue DWB for */
690232809Sjmallett	uint64_t rwen                         : 1;  /**< Enable RWI/RWO operations
691232809Sjmallett                                                         This bit should be set after SSO_RWQ_HEAD_PTRX and
692232809Sjmallett                                                         SSO_RWQ_TAIL_PTRX have been programmed. */
693232809Sjmallett#else
694232809Sjmallett	uint64_t rwen                         : 1;
695232809Sjmallett	uint64_t dwb                          : 1;
696232809Sjmallett	uint64_t ldt                          : 1;
697232809Sjmallett	uint64_t stt                          : 1;
698232809Sjmallett	uint64_t rwq_byp_dis                  : 1;
699232809Sjmallett	uint64_t rwio_byp_dis                 : 1;
700232809Sjmallett	uint64_t wfe_thr                      : 1;
701232809Sjmallett	uint64_t rwo_flush                    : 1;
702232809Sjmallett	uint64_t sso_cclk_dis                 : 1;
703232809Sjmallett	uint64_t soc_ccam_dis                 : 1;
704232809Sjmallett	uint64_t rwq_alloc_dis                : 1;
705232809Sjmallett	uint64_t qck_sw_dis                   : 1;
706232809Sjmallett	uint64_t qck_gw_rsp_dis               : 1;
707232809Sjmallett	uint64_t qck_gw_rsp_adj               : 3;
708232809Sjmallett	uint64_t reserved_16_63               : 48;
709232809Sjmallett#endif
710232809Sjmallett	} s;
711232809Sjmallett	struct cvmx_sso_cfg_s                 cn68xx;
712232809Sjmallett	struct cvmx_sso_cfg_cn68xxp1 {
713232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
714232809Sjmallett	uint64_t reserved_8_63                : 56;
715232809Sjmallett	uint64_t rwo_flush                    : 1;  /**< Flush RWO engine
716232809Sjmallett                                                         Allows outbound NCB entries to go immediately rather
717232809Sjmallett                                                         than waiting for a complete fill packet. This register
718232809Sjmallett                                                         is one-shot and clears itself each time it is set. */
719232809Sjmallett	uint64_t wfe_thr                      : 1;  /**< Use 1 Work-fetch engine (instead of 4) */
720232809Sjmallett	uint64_t rwio_byp_dis                 : 1;  /**< Disable Bypass path in RWI/RWO Engines */
721232809Sjmallett	uint64_t rwq_byp_dis                  : 1;  /**< Disable Bypass path in RWQ Engine */
722232809Sjmallett	uint64_t stt                          : 1;  /**< STT Setting for RW Stores */
723232809Sjmallett	uint64_t ldt                          : 1;  /**< LDT Setting for RW Loads */
724232809Sjmallett	uint64_t dwb                          : 1;  /**< DWB Setting for Return Page Requests
725232809Sjmallett                                                         1 = 2 128B cache pages to issue DWB for
726232809Sjmallett                                                         0 = 0 128B cache pages ro issue DWB for */
727232809Sjmallett	uint64_t rwen                         : 1;  /**< Enable RWI/RWO operations
728232809Sjmallett                                                         This bit should be set after SSO_RWQ_HEAD_PTRX and
729232809Sjmallett                                                         SSO_RWQ_TAIL_PTRX have been programmed. */
730232809Sjmallett#else
731232809Sjmallett	uint64_t rwen                         : 1;
732232809Sjmallett	uint64_t dwb                          : 1;
733232809Sjmallett	uint64_t ldt                          : 1;
734232809Sjmallett	uint64_t stt                          : 1;
735232809Sjmallett	uint64_t rwq_byp_dis                  : 1;
736232809Sjmallett	uint64_t rwio_byp_dis                 : 1;
737232809Sjmallett	uint64_t wfe_thr                      : 1;
738232809Sjmallett	uint64_t rwo_flush                    : 1;
739232809Sjmallett	uint64_t reserved_8_63                : 56;
740232809Sjmallett#endif
741232809Sjmallett	} cn68xxp1;
742232809Sjmallett};
743232809Sjmalletttypedef union cvmx_sso_cfg cvmx_sso_cfg_t;
744232809Sjmallett
745232809Sjmallett/**
746232809Sjmallett * cvmx_sso_ds_pc
747232809Sjmallett *
748232809Sjmallett * SSO_DS_PC = SSO De-Schedule Performance Counter
749232809Sjmallett *
750232809Sjmallett * Counts the number of de-schedule requests.
751232809Sjmallett * Counter rolls over through zero when max value exceeded.
752232809Sjmallett */
753232809Sjmallettunion cvmx_sso_ds_pc {
754232809Sjmallett	uint64_t u64;
755232809Sjmallett	struct cvmx_sso_ds_pc_s {
756232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
757232809Sjmallett	uint64_t ds_pc                        : 64; /**< De-schedule performance counter */
758232809Sjmallett#else
759232809Sjmallett	uint64_t ds_pc                        : 64;
760232809Sjmallett#endif
761232809Sjmallett	} s;
762232809Sjmallett	struct cvmx_sso_ds_pc_s               cn68xx;
763232809Sjmallett	struct cvmx_sso_ds_pc_s               cn68xxp1;
764232809Sjmallett};
765232809Sjmalletttypedef union cvmx_sso_ds_pc cvmx_sso_ds_pc_t;
766232809Sjmallett
767232809Sjmallett/**
768232809Sjmallett * cvmx_sso_err
769232809Sjmallett *
770232809Sjmallett * SSO_ERR = SSO Error Register
771232809Sjmallett *
772232809Sjmallett * Contains ECC and other misc error bits.
773232809Sjmallett *
774232809Sjmallett * <45> The free page error bit will assert when SSO_FPAGE_CNT <= 16 and
775232809Sjmallett *      SSO_CFG[RWEN] is 1.  Software will want to disable the interrupt
776232809Sjmallett *      associated with this error when recovering SSO pointers from the
777232809Sjmallett *      FPA and SSO.
778232809Sjmallett *
779232809Sjmallett * This register also contains the illegal operation error bits:
780232809Sjmallett *
781232809Sjmallett * <42> Received ADDWQ with tag specified as EMPTY
782232809Sjmallett * <41> Received illegal opcode
783232809Sjmallett * <40> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
784232809Sjmallett *      from WS with CLR_NSCHED pending
785232809Sjmallett * <39> Received CLR_NSCHED
786232809Sjmallett *      from WS with SWTAG_DESCH/DESCH/CLR_NSCHED pending
787232809Sjmallett * <38> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
788232809Sjmallett *      from WS with ALLOC_WE pending
789232809Sjmallett * <37> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE/CLR_NSCHED
790232809Sjmallett *      from WS with GET_WORK pending
791232809Sjmallett * <36> Received SWTAG_FULL/SWTAG_DESCH
792232809Sjmallett *      with tag specified as UNSCHEDULED
793232809Sjmallett * <35> Received SWTAG/SWTAG_FULL/SWTAG_DESCH
794232809Sjmallett *      with tag specified as EMPTY
795232809Sjmallett * <34> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK
796232809Sjmallett *      from WS with pending tag switch to ORDERED or ATOMIC
797232809Sjmallett * <33> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP
798232809Sjmallett *      from WS in UNSCHEDULED state
799232809Sjmallett * <32> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP
800232809Sjmallett *      from WS in EMPTY state
801232809Sjmallett */
802232809Sjmallettunion cvmx_sso_err {
803232809Sjmallett	uint64_t u64;
804232809Sjmallett	struct cvmx_sso_err_s {
805232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
806232809Sjmallett	uint64_t reserved_48_63               : 16;
807232809Sjmallett	uint64_t bfp                          : 1;  /**< Bad Fill Packet error
808232809Sjmallett                                                         Last byte of the fill packet did not match 8'h1a */
809232809Sjmallett	uint64_t awe                          : 1;  /**< Out-of-memory error (ADDWQ Request is dropped) */
810232809Sjmallett	uint64_t fpe                          : 1;  /**< Free page error */
811232809Sjmallett	uint64_t reserved_43_44               : 2;
812232809Sjmallett	uint64_t iop                          : 11; /**< Illegal operation errors */
813232809Sjmallett	uint64_t reserved_12_31               : 20;
814232809Sjmallett	uint64_t pnd_dbe0                     : 1;  /**< Double bit error for even PND RAM */
815232809Sjmallett	uint64_t pnd_sbe0                     : 1;  /**< Single bit error for even PND RAM */
816232809Sjmallett	uint64_t pnd_dbe1                     : 1;  /**< Double bit error for odd PND RAM */
817232809Sjmallett	uint64_t pnd_sbe1                     : 1;  /**< Single bit error for odd PND RAM */
818232809Sjmallett	uint64_t oth_dbe0                     : 1;  /**< Double bit error for even OTH RAM */
819232809Sjmallett	uint64_t oth_sbe0                     : 1;  /**< Single bit error for even OTH RAM */
820232809Sjmallett	uint64_t oth_dbe1                     : 1;  /**< Double bit error for odd OTH RAM */
821232809Sjmallett	uint64_t oth_sbe1                     : 1;  /**< Single bit error for odd OTH RAM */
822232809Sjmallett	uint64_t idx_dbe                      : 1;  /**< Double bit error for IDX RAM */
823232809Sjmallett	uint64_t idx_sbe                      : 1;  /**< Single bit error for IDX RAM */
824232809Sjmallett	uint64_t fidx_dbe                     : 1;  /**< Double bit error for FIDX RAM */
825232809Sjmallett	uint64_t fidx_sbe                     : 1;  /**< Single bit error for FIDX RAM */
826232809Sjmallett#else
827232809Sjmallett	uint64_t fidx_sbe                     : 1;
828232809Sjmallett	uint64_t fidx_dbe                     : 1;
829232809Sjmallett	uint64_t idx_sbe                      : 1;
830232809Sjmallett	uint64_t idx_dbe                      : 1;
831232809Sjmallett	uint64_t oth_sbe1                     : 1;
832232809Sjmallett	uint64_t oth_dbe1                     : 1;
833232809Sjmallett	uint64_t oth_sbe0                     : 1;
834232809Sjmallett	uint64_t oth_dbe0                     : 1;
835232809Sjmallett	uint64_t pnd_sbe1                     : 1;
836232809Sjmallett	uint64_t pnd_dbe1                     : 1;
837232809Sjmallett	uint64_t pnd_sbe0                     : 1;
838232809Sjmallett	uint64_t pnd_dbe0                     : 1;
839232809Sjmallett	uint64_t reserved_12_31               : 20;
840232809Sjmallett	uint64_t iop                          : 11;
841232809Sjmallett	uint64_t reserved_43_44               : 2;
842232809Sjmallett	uint64_t fpe                          : 1;
843232809Sjmallett	uint64_t awe                          : 1;
844232809Sjmallett	uint64_t bfp                          : 1;
845232809Sjmallett	uint64_t reserved_48_63               : 16;
846232809Sjmallett#endif
847232809Sjmallett	} s;
848232809Sjmallett	struct cvmx_sso_err_s                 cn68xx;
849232809Sjmallett	struct cvmx_sso_err_s                 cn68xxp1;
850232809Sjmallett};
851232809Sjmalletttypedef union cvmx_sso_err cvmx_sso_err_t;
852232809Sjmallett
853232809Sjmallett/**
854232809Sjmallett * cvmx_sso_err_enb
855232809Sjmallett *
856232809Sjmallett * SSO_ERR_ENB = SSO Error Enable Register
857232809Sjmallett *
858232809Sjmallett * Contains the interrupt enables corresponding to SSO_ERR.
859232809Sjmallett */
860232809Sjmallettunion cvmx_sso_err_enb {
861232809Sjmallett	uint64_t u64;
862232809Sjmallett	struct cvmx_sso_err_enb_s {
863232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
864232809Sjmallett	uint64_t reserved_48_63               : 16;
865232809Sjmallett	uint64_t bfp_ie                       : 1;  /**< Bad Fill Packet error interrupt enable */
866232809Sjmallett	uint64_t awe_ie                       : 1;  /**< Add-work error interrupt enable */
867232809Sjmallett	uint64_t fpe_ie                       : 1;  /**< Free Page error interrupt enable */
868232809Sjmallett	uint64_t reserved_43_44               : 2;
869232809Sjmallett	uint64_t iop_ie                       : 11; /**< Illegal operation interrupt enables */
870232809Sjmallett	uint64_t reserved_12_31               : 20;
871232809Sjmallett	uint64_t pnd_dbe0_ie                  : 1;  /**< Double bit error interrupt enable for even PND RAM */
872232809Sjmallett	uint64_t pnd_sbe0_ie                  : 1;  /**< Single bit error interrupt enable for even PND RAM */
873232809Sjmallett	uint64_t pnd_dbe1_ie                  : 1;  /**< Double bit error interrupt enable for odd PND RAM */
874232809Sjmallett	uint64_t pnd_sbe1_ie                  : 1;  /**< Single bit error interrupt enable for odd PND RAM */
875232809Sjmallett	uint64_t oth_dbe0_ie                  : 1;  /**< Double bit error interrupt enable for even OTH RAM */
876232809Sjmallett	uint64_t oth_sbe0_ie                  : 1;  /**< Single bit error interrupt enable for even OTH RAM */
877232809Sjmallett	uint64_t oth_dbe1_ie                  : 1;  /**< Double bit error interrupt enable for odd OTH RAM */
878232809Sjmallett	uint64_t oth_sbe1_ie                  : 1;  /**< Single bit error interrupt enable for odd OTH RAM */
879232809Sjmallett	uint64_t idx_dbe_ie                   : 1;  /**< Double bit error interrupt enable for IDX RAM */
880232809Sjmallett	uint64_t idx_sbe_ie                   : 1;  /**< Single bit error interrupt enable for IDX RAM */
881232809Sjmallett	uint64_t fidx_dbe_ie                  : 1;  /**< Double bit error interrupt enable for FIDX RAM */
882232809Sjmallett	uint64_t fidx_sbe_ie                  : 1;  /**< Single bit error interrupt enable for FIDX RAM */
883232809Sjmallett#else
884232809Sjmallett	uint64_t fidx_sbe_ie                  : 1;
885232809Sjmallett	uint64_t fidx_dbe_ie                  : 1;
886232809Sjmallett	uint64_t idx_sbe_ie                   : 1;
887232809Sjmallett	uint64_t idx_dbe_ie                   : 1;
888232809Sjmallett	uint64_t oth_sbe1_ie                  : 1;
889232809Sjmallett	uint64_t oth_dbe1_ie                  : 1;
890232809Sjmallett	uint64_t oth_sbe0_ie                  : 1;
891232809Sjmallett	uint64_t oth_dbe0_ie                  : 1;
892232809Sjmallett	uint64_t pnd_sbe1_ie                  : 1;
893232809Sjmallett	uint64_t pnd_dbe1_ie                  : 1;
894232809Sjmallett	uint64_t pnd_sbe0_ie                  : 1;
895232809Sjmallett	uint64_t pnd_dbe0_ie                  : 1;
896232809Sjmallett	uint64_t reserved_12_31               : 20;
897232809Sjmallett	uint64_t iop_ie                       : 11;
898232809Sjmallett	uint64_t reserved_43_44               : 2;
899232809Sjmallett	uint64_t fpe_ie                       : 1;
900232809Sjmallett	uint64_t awe_ie                       : 1;
901232809Sjmallett	uint64_t bfp_ie                       : 1;
902232809Sjmallett	uint64_t reserved_48_63               : 16;
903232809Sjmallett#endif
904232809Sjmallett	} s;
905232809Sjmallett	struct cvmx_sso_err_enb_s             cn68xx;
906232809Sjmallett	struct cvmx_sso_err_enb_s             cn68xxp1;
907232809Sjmallett};
908232809Sjmalletttypedef union cvmx_sso_err_enb cvmx_sso_err_enb_t;
909232809Sjmallett
910232809Sjmallett/**
911232809Sjmallett * cvmx_sso_fidx_ecc_ctl
912232809Sjmallett *
913232809Sjmallett * SSO_FIDX_ECC_CTL = SSO FIDX ECC Control
914232809Sjmallett *
915232809Sjmallett */
916232809Sjmallettunion cvmx_sso_fidx_ecc_ctl {
917232809Sjmallett	uint64_t u64;
918232809Sjmallett	struct cvmx_sso_fidx_ecc_ctl_s {
919232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
920232809Sjmallett	uint64_t reserved_3_63                : 61;
921232809Sjmallett	uint64_t flip_synd                    : 2;  /**< Testing feature. Flip Syndrom to generate single or
922232809Sjmallett                                                         double bit error for the FIDX RAM. */
923232809Sjmallett	uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 5 bit ECC
924232809Sjmallett                                                         correct logic for the FIDX RAM. */
925232809Sjmallett#else
926232809Sjmallett	uint64_t ecc_ena                      : 1;
927232809Sjmallett	uint64_t flip_synd                    : 2;
928232809Sjmallett	uint64_t reserved_3_63                : 61;
929232809Sjmallett#endif
930232809Sjmallett	} s;
931232809Sjmallett	struct cvmx_sso_fidx_ecc_ctl_s        cn68xx;
932232809Sjmallett	struct cvmx_sso_fidx_ecc_ctl_s        cn68xxp1;
933232809Sjmallett};
934232809Sjmalletttypedef union cvmx_sso_fidx_ecc_ctl cvmx_sso_fidx_ecc_ctl_t;
935232809Sjmallett
936232809Sjmallett/**
937232809Sjmallett * cvmx_sso_fidx_ecc_st
938232809Sjmallett *
939232809Sjmallett * SSO_FIDX_ECC_ST = SSO FIDX ECC Status
940232809Sjmallett *
941232809Sjmallett */
942232809Sjmallettunion cvmx_sso_fidx_ecc_st {
943232809Sjmallett	uint64_t u64;
944232809Sjmallett	struct cvmx_sso_fidx_ecc_st_s {
945232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
946232809Sjmallett	uint64_t reserved_27_63               : 37;
947232809Sjmallett	uint64_t addr                         : 11; /**< Latch the address for latest sde/dbe occured
948232809Sjmallett                                                         for the FIDX RAM */
949232809Sjmallett	uint64_t reserved_9_15                : 7;
950232809Sjmallett	uint64_t syndrom                      : 5;  /**< Report the latest error syndrom for the
951232809Sjmallett                                                         FIDX RAM */
952232809Sjmallett	uint64_t reserved_0_3                 : 4;
953232809Sjmallett#else
954232809Sjmallett	uint64_t reserved_0_3                 : 4;
955232809Sjmallett	uint64_t syndrom                      : 5;
956232809Sjmallett	uint64_t reserved_9_15                : 7;
957232809Sjmallett	uint64_t addr                         : 11;
958232809Sjmallett	uint64_t reserved_27_63               : 37;
959232809Sjmallett#endif
960232809Sjmallett	} s;
961232809Sjmallett	struct cvmx_sso_fidx_ecc_st_s         cn68xx;
962232809Sjmallett	struct cvmx_sso_fidx_ecc_st_s         cn68xxp1;
963232809Sjmallett};
964232809Sjmalletttypedef union cvmx_sso_fidx_ecc_st cvmx_sso_fidx_ecc_st_t;
965232809Sjmallett
966232809Sjmallett/**
967232809Sjmallett * cvmx_sso_fpage_cnt
968232809Sjmallett *
969232809Sjmallett * SSO_FPAGE_CNT = SSO Free Page Cnt
970232809Sjmallett *
971232809Sjmallett * This register keeps track of the number of free pages pointers available for use in external memory.
972232809Sjmallett */
973232809Sjmallettunion cvmx_sso_fpage_cnt {
974232809Sjmallett	uint64_t u64;
975232809Sjmallett	struct cvmx_sso_fpage_cnt_s {
976232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
977232809Sjmallett	uint64_t reserved_32_63               : 32;
978232809Sjmallett	uint64_t fpage_cnt                    : 32; /**< Free Page Cnt
979232809Sjmallett                                                         HW updates this register. Writes to this register
980232809Sjmallett                                                         are only for diagnostic purposes */
981232809Sjmallett#else
982232809Sjmallett	uint64_t fpage_cnt                    : 32;
983232809Sjmallett	uint64_t reserved_32_63               : 32;
984232809Sjmallett#endif
985232809Sjmallett	} s;
986232809Sjmallett	struct cvmx_sso_fpage_cnt_s           cn68xx;
987232809Sjmallett	struct cvmx_sso_fpage_cnt_s           cn68xxp1;
988232809Sjmallett};
989232809Sjmalletttypedef union cvmx_sso_fpage_cnt cvmx_sso_fpage_cnt_t;
990232809Sjmallett
991232809Sjmallett/**
992232809Sjmallett * cvmx_sso_gwe_cfg
993232809Sjmallett *
994232809Sjmallett * SSO_GWE_CFG = SSO Get-Work Examiner Configuration
995232809Sjmallett *
996232809Sjmallett * This register controls the operation of the Get-Work Examiner (GWE)
997232809Sjmallett */
998232809Sjmallettunion cvmx_sso_gwe_cfg {
999232809Sjmallett	uint64_t u64;
1000232809Sjmallett	struct cvmx_sso_gwe_cfg_s {
1001232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1002232809Sjmallett	uint64_t reserved_12_63               : 52;
1003232809Sjmallett	uint64_t odu_ffpgw_dis                : 1;  /**< Disable flushing ODU on periodic restart of GWE */
1004232809Sjmallett	uint64_t gwe_rfpgw_dis                : 1;  /**< Disable periodic restart of GWE for pending get_work */
1005232809Sjmallett	uint64_t odu_prf_dis                  : 1;  /**< Disable ODU-initiated prefetches of WQEs into L2C
1006232809Sjmallett                                                         For diagnostic use only. */
1007232809Sjmallett	uint64_t odu_bmp_dis                  : 1;  /**< Disable ODU bumps.
1008232809Sjmallett                                                         If SSO_PP_STRICT is true, could
1009232809Sjmallett                                                         prevent forward progress under some circumstances.
1010232809Sjmallett                                                         For diagnostic use only. */
1011232809Sjmallett	uint64_t reserved_5_7                 : 3;
1012232809Sjmallett	uint64_t gwe_hvy_dis                  : 1;  /**< Disable GWE automatic, proportional weight-increase
1013232809Sjmallett                                                         mechanism and use SSO_QOSX_RND values as-is.
1014232809Sjmallett                                                         For diagnostic use only. */
1015232809Sjmallett	uint64_t gwe_poe                      : 1;  /**< Pause GWE on extracts
1016232809Sjmallett                                                         For diagnostic use only. */
1017232809Sjmallett	uint64_t gwe_fpor                     : 1;  /**< Flush GWE pipeline when restarting GWE.
1018232809Sjmallett                                                         For diagnostic use only. */
1019232809Sjmallett	uint64_t gwe_rah                      : 1;  /**< Begin at head of input queues when restarting GWE.
1020232809Sjmallett                                                         For diagnostic use only. */
1021232809Sjmallett	uint64_t gwe_dis                      : 1;  /**< Disable Get-Work Examiner */
1022232809Sjmallett#else
1023232809Sjmallett	uint64_t gwe_dis                      : 1;
1024232809Sjmallett	uint64_t gwe_rah                      : 1;
1025232809Sjmallett	uint64_t gwe_fpor                     : 1;
1026232809Sjmallett	uint64_t gwe_poe                      : 1;
1027232809Sjmallett	uint64_t gwe_hvy_dis                  : 1;
1028232809Sjmallett	uint64_t reserved_5_7                 : 3;
1029232809Sjmallett	uint64_t odu_bmp_dis                  : 1;
1030232809Sjmallett	uint64_t odu_prf_dis                  : 1;
1031232809Sjmallett	uint64_t gwe_rfpgw_dis                : 1;
1032232809Sjmallett	uint64_t odu_ffpgw_dis                : 1;
1033232809Sjmallett	uint64_t reserved_12_63               : 52;
1034232809Sjmallett#endif
1035232809Sjmallett	} s;
1036232809Sjmallett	struct cvmx_sso_gwe_cfg_s             cn68xx;
1037232809Sjmallett	struct cvmx_sso_gwe_cfg_cn68xxp1 {
1038232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1039232809Sjmallett	uint64_t reserved_4_63                : 60;
1040232809Sjmallett	uint64_t gwe_poe                      : 1;  /**< Pause GWE on extracts
1041232809Sjmallett                                                         For diagnostic use only. */
1042232809Sjmallett	uint64_t gwe_fpor                     : 1;  /**< Flush GWE pipeline when restarting GWE.
1043232809Sjmallett                                                         For diagnostic use only. */
1044232809Sjmallett	uint64_t gwe_rah                      : 1;  /**< Begin at head of input queues when restarting GWE.
1045232809Sjmallett                                                         For diagnostic use only. */
1046232809Sjmallett	uint64_t gwe_dis                      : 1;  /**< Disable Get-Work Examiner */
1047232809Sjmallett#else
1048232809Sjmallett	uint64_t gwe_dis                      : 1;
1049232809Sjmallett	uint64_t gwe_rah                      : 1;
1050232809Sjmallett	uint64_t gwe_fpor                     : 1;
1051232809Sjmallett	uint64_t gwe_poe                      : 1;
1052232809Sjmallett	uint64_t reserved_4_63                : 60;
1053232809Sjmallett#endif
1054232809Sjmallett	} cn68xxp1;
1055232809Sjmallett};
1056232809Sjmalletttypedef union cvmx_sso_gwe_cfg cvmx_sso_gwe_cfg_t;
1057232809Sjmallett
1058232809Sjmallett/**
1059232809Sjmallett * cvmx_sso_idx_ecc_ctl
1060232809Sjmallett *
1061232809Sjmallett * SSO_IDX_ECC_CTL = SSO IDX ECC Control
1062232809Sjmallett *
1063232809Sjmallett */
1064232809Sjmallettunion cvmx_sso_idx_ecc_ctl {
1065232809Sjmallett	uint64_t u64;
1066232809Sjmallett	struct cvmx_sso_idx_ecc_ctl_s {
1067232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1068232809Sjmallett	uint64_t reserved_3_63                : 61;
1069232809Sjmallett	uint64_t flip_synd                    : 2;  /**< Testing feature. Flip Syndrom to generate single or
1070232809Sjmallett                                                         double bit error for the IDX RAM. */
1071232809Sjmallett	uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 5 bit ECC
1072232809Sjmallett                                                         correct logic for the IDX RAM. */
1073232809Sjmallett#else
1074232809Sjmallett	uint64_t ecc_ena                      : 1;
1075232809Sjmallett	uint64_t flip_synd                    : 2;
1076232809Sjmallett	uint64_t reserved_3_63                : 61;
1077232809Sjmallett#endif
1078232809Sjmallett	} s;
1079232809Sjmallett	struct cvmx_sso_idx_ecc_ctl_s         cn68xx;
1080232809Sjmallett	struct cvmx_sso_idx_ecc_ctl_s         cn68xxp1;
1081232809Sjmallett};
1082232809Sjmalletttypedef union cvmx_sso_idx_ecc_ctl cvmx_sso_idx_ecc_ctl_t;
1083232809Sjmallett
1084232809Sjmallett/**
1085232809Sjmallett * cvmx_sso_idx_ecc_st
1086232809Sjmallett *
1087232809Sjmallett * SSO_IDX_ECC_ST = SSO IDX ECC Status
1088232809Sjmallett *
1089232809Sjmallett */
1090232809Sjmallettunion cvmx_sso_idx_ecc_st {
1091232809Sjmallett	uint64_t u64;
1092232809Sjmallett	struct cvmx_sso_idx_ecc_st_s {
1093232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1094232809Sjmallett	uint64_t reserved_27_63               : 37;
1095232809Sjmallett	uint64_t addr                         : 11; /**< Latch the address for latest sde/dbe occured
1096232809Sjmallett                                                         for the IDX RAM */
1097232809Sjmallett	uint64_t reserved_9_15                : 7;
1098232809Sjmallett	uint64_t syndrom                      : 5;  /**< Report the latest error syndrom for the
1099232809Sjmallett                                                         IDX RAM */
1100232809Sjmallett	uint64_t reserved_0_3                 : 4;
1101232809Sjmallett#else
1102232809Sjmallett	uint64_t reserved_0_3                 : 4;
1103232809Sjmallett	uint64_t syndrom                      : 5;
1104232809Sjmallett	uint64_t reserved_9_15                : 7;
1105232809Sjmallett	uint64_t addr                         : 11;
1106232809Sjmallett	uint64_t reserved_27_63               : 37;
1107232809Sjmallett#endif
1108232809Sjmallett	} s;
1109232809Sjmallett	struct cvmx_sso_idx_ecc_st_s          cn68xx;
1110232809Sjmallett	struct cvmx_sso_idx_ecc_st_s          cn68xxp1;
1111232809Sjmallett};
1112232809Sjmalletttypedef union cvmx_sso_idx_ecc_st cvmx_sso_idx_ecc_st_t;
1113232809Sjmallett
1114232809Sjmallett/**
1115232809Sjmallett * cvmx_sso_iq_cnt#
1116232809Sjmallett *
1117232809Sjmallett * CSR reserved addresses: (64): 0x8200..0x83f8
1118232809Sjmallett * CSR align addresses: ===========================================================================================================
1119232809Sjmallett * SSO_IQ_CNTX = SSO Input Queue Count Register
1120232809Sjmallett *               (one per QOS level)
1121232809Sjmallett *
1122232809Sjmallett * Contains a read-only count of the number of work queue entries for each QOS
1123232809Sjmallett * level. Counts both in-unit and in-memory entries.
1124232809Sjmallett */
1125232809Sjmallettunion cvmx_sso_iq_cntx {
1126232809Sjmallett	uint64_t u64;
1127232809Sjmallett	struct cvmx_sso_iq_cntx_s {
1128232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1129232809Sjmallett	uint64_t reserved_32_63               : 32;
1130232809Sjmallett	uint64_t iq_cnt                       : 32; /**< Input queue count for QOS level X */
1131232809Sjmallett#else
1132232809Sjmallett	uint64_t iq_cnt                       : 32;
1133232809Sjmallett	uint64_t reserved_32_63               : 32;
1134232809Sjmallett#endif
1135232809Sjmallett	} s;
1136232809Sjmallett	struct cvmx_sso_iq_cntx_s             cn68xx;
1137232809Sjmallett	struct cvmx_sso_iq_cntx_s             cn68xxp1;
1138232809Sjmallett};
1139232809Sjmalletttypedef union cvmx_sso_iq_cntx cvmx_sso_iq_cntx_t;
1140232809Sjmallett
1141232809Sjmallett/**
1142232809Sjmallett * cvmx_sso_iq_com_cnt
1143232809Sjmallett *
1144232809Sjmallett * SSO_IQ_COM_CNT = SSO Input Queue Combined Count Register
1145232809Sjmallett *
1146232809Sjmallett * Contains a read-only count of the total number of work queue entries in all
1147232809Sjmallett * QOS levels.  Counts both in-unit and in-memory entries.
1148232809Sjmallett */
1149232809Sjmallettunion cvmx_sso_iq_com_cnt {
1150232809Sjmallett	uint64_t u64;
1151232809Sjmallett	struct cvmx_sso_iq_com_cnt_s {
1152232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1153232809Sjmallett	uint64_t reserved_32_63               : 32;
1154232809Sjmallett	uint64_t iq_cnt                       : 32; /**< Input queue combined count */
1155232809Sjmallett#else
1156232809Sjmallett	uint64_t iq_cnt                       : 32;
1157232809Sjmallett	uint64_t reserved_32_63               : 32;
1158232809Sjmallett#endif
1159232809Sjmallett	} s;
1160232809Sjmallett	struct cvmx_sso_iq_com_cnt_s          cn68xx;
1161232809Sjmallett	struct cvmx_sso_iq_com_cnt_s          cn68xxp1;
1162232809Sjmallett};
1163232809Sjmalletttypedef union cvmx_sso_iq_com_cnt cvmx_sso_iq_com_cnt_t;
1164232809Sjmallett
1165232809Sjmallett/**
1166232809Sjmallett * cvmx_sso_iq_int
1167232809Sjmallett *
1168232809Sjmallett * SSO_IQ_INT = SSO Input Queue Interrupt Register
1169232809Sjmallett *
1170232809Sjmallett * Contains the bits (one per QOS level) that can trigger the input queue
1171232809Sjmallett * interrupt.  An IQ_INT bit will be set if SSO_IQ_CNT#QOS# changes and the
1172232809Sjmallett * resulting value is equal to SSO_IQ_THR#QOS#.
1173232809Sjmallett */
1174232809Sjmallettunion cvmx_sso_iq_int {
1175232809Sjmallett	uint64_t u64;
1176232809Sjmallett	struct cvmx_sso_iq_int_s {
1177232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1178232809Sjmallett	uint64_t reserved_8_63                : 56;
1179232809Sjmallett	uint64_t iq_int                       : 8;  /**< Input queue interrupt bits */
1180232809Sjmallett#else
1181232809Sjmallett	uint64_t iq_int                       : 8;
1182232809Sjmallett	uint64_t reserved_8_63                : 56;
1183232809Sjmallett#endif
1184232809Sjmallett	} s;
1185232809Sjmallett	struct cvmx_sso_iq_int_s              cn68xx;
1186232809Sjmallett	struct cvmx_sso_iq_int_s              cn68xxp1;
1187232809Sjmallett};
1188232809Sjmalletttypedef union cvmx_sso_iq_int cvmx_sso_iq_int_t;
1189232809Sjmallett
1190232809Sjmallett/**
1191232809Sjmallett * cvmx_sso_iq_int_en
1192232809Sjmallett *
1193232809Sjmallett * SSO_IQ_INT_EN = SSO Input Queue Interrupt Enable Register
1194232809Sjmallett *
1195232809Sjmallett * Contains the bits (one per QOS level) that enable the input queue interrupt.
1196232809Sjmallett */
1197232809Sjmallettunion cvmx_sso_iq_int_en {
1198232809Sjmallett	uint64_t u64;
1199232809Sjmallett	struct cvmx_sso_iq_int_en_s {
1200232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1201232809Sjmallett	uint64_t reserved_8_63                : 56;
1202232809Sjmallett	uint64_t int_en                       : 8;  /**< Input queue interrupt enable bits */
1203232809Sjmallett#else
1204232809Sjmallett	uint64_t int_en                       : 8;
1205232809Sjmallett	uint64_t reserved_8_63                : 56;
1206232809Sjmallett#endif
1207232809Sjmallett	} s;
1208232809Sjmallett	struct cvmx_sso_iq_int_en_s           cn68xx;
1209232809Sjmallett	struct cvmx_sso_iq_int_en_s           cn68xxp1;
1210232809Sjmallett};
1211232809Sjmalletttypedef union cvmx_sso_iq_int_en cvmx_sso_iq_int_en_t;
1212232809Sjmallett
1213232809Sjmallett/**
1214232809Sjmallett * cvmx_sso_iq_thr#
1215232809Sjmallett *
1216232809Sjmallett * CSR reserved addresses: (24): 0x9040..0x90f8
1217232809Sjmallett * CSR align addresses: ===========================================================================================================
1218232809Sjmallett * SSO_IQ_THRX = SSO Input Queue Threshold Register
1219232809Sjmallett *               (one per QOS level)
1220232809Sjmallett *
1221232809Sjmallett * Threshold value for triggering input queue interrupts.
1222232809Sjmallett */
1223232809Sjmallettunion cvmx_sso_iq_thrx {
1224232809Sjmallett	uint64_t u64;
1225232809Sjmallett	struct cvmx_sso_iq_thrx_s {
1226232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1227232809Sjmallett	uint64_t reserved_32_63               : 32;
1228232809Sjmallett	uint64_t iq_thr                       : 32; /**< Input queue threshold for QOS level X */
1229232809Sjmallett#else
1230232809Sjmallett	uint64_t iq_thr                       : 32;
1231232809Sjmallett	uint64_t reserved_32_63               : 32;
1232232809Sjmallett#endif
1233232809Sjmallett	} s;
1234232809Sjmallett	struct cvmx_sso_iq_thrx_s             cn68xx;
1235232809Sjmallett	struct cvmx_sso_iq_thrx_s             cn68xxp1;
1236232809Sjmallett};
1237232809Sjmalletttypedef union cvmx_sso_iq_thrx cvmx_sso_iq_thrx_t;
1238232809Sjmallett
1239232809Sjmallett/**
1240232809Sjmallett * cvmx_sso_nos_cnt
1241232809Sjmallett *
1242232809Sjmallett * SSO_NOS_CNT = SSO No-schedule Count Register
1243232809Sjmallett *
1244232809Sjmallett * Contains the number of work queue entries on the no-schedule list.
1245232809Sjmallett */
1246232809Sjmallettunion cvmx_sso_nos_cnt {
1247232809Sjmallett	uint64_t u64;
1248232809Sjmallett	struct cvmx_sso_nos_cnt_s {
1249232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1250232809Sjmallett	uint64_t reserved_12_63               : 52;
1251232809Sjmallett	uint64_t nos_cnt                      : 12; /**< Number of work queue entries on the no-schedule list */
1252232809Sjmallett#else
1253232809Sjmallett	uint64_t nos_cnt                      : 12;
1254232809Sjmallett	uint64_t reserved_12_63               : 52;
1255232809Sjmallett#endif
1256232809Sjmallett	} s;
1257232809Sjmallett	struct cvmx_sso_nos_cnt_s             cn68xx;
1258232809Sjmallett	struct cvmx_sso_nos_cnt_s             cn68xxp1;
1259232809Sjmallett};
1260232809Sjmalletttypedef union cvmx_sso_nos_cnt cvmx_sso_nos_cnt_t;
1261232809Sjmallett
1262232809Sjmallett/**
1263232809Sjmallett * cvmx_sso_nw_tim
1264232809Sjmallett *
1265232809Sjmallett * SSO_NW_TIM = SSO New Work Timer Period Register
1266232809Sjmallett *
1267232809Sjmallett * Sets the minimum period for a new work request timeout.  Period is specified
1268232809Sjmallett * in n-1 notation where the increment value is 1024 clock cycles.  Thus, a
1269232809Sjmallett * value of 0x0 in this register translates to 1024 cycles, 0x1 translates to
1270232809Sjmallett * 2048 cycles, 0x2 translates to 3072 cycles, etc...  Note: the maximum period
1271232809Sjmallett * for a new work request timeout is 2 times the minimum period.  Note: the new
1272232809Sjmallett * work request timeout counter is reset when this register is written.
1273232809Sjmallett *
1274232809Sjmallett * There are two new work request timeout cases:
1275232809Sjmallett *
1276232809Sjmallett * - WAIT bit clear.  The new work request can timeout if the timer expires
1277232809Sjmallett *   before the pre-fetch engine has reached the end of all work queues.  This
1278232809Sjmallett *   can occur if the executable work queue entry is deep in the queue and the
1279232809Sjmallett *   pre-fetch engine is subject to many resets (i.e. high switch, de-schedule,
1280232809Sjmallett *   or new work load from other PP's).  Thus, it is possible for a PP to
1281232809Sjmallett *   receive a work response with the NO_WORK bit set even though there was at
1282232809Sjmallett *   least one executable entry in the work queues.  The other (and typical)
1283232809Sjmallett *   scenario for receiving a NO_WORK response with the WAIT bit clear is that
1284232809Sjmallett *   the pre-fetch engine has reached the end of all work queues without
1285232809Sjmallett *   finding executable work.
1286232809Sjmallett *
1287232809Sjmallett * - WAIT bit set.  The new work request can timeout if the timer expires
1288232809Sjmallett *   before the pre-fetch engine has found executable work.  In this case, the
1289232809Sjmallett *   only scenario where the PP will receive a work response with the NO_WORK
1290232809Sjmallett *   bit set is if the timer expires.  Note: it is still possible for a PP to
1291232809Sjmallett *   receive a NO_WORK response even though there was at least one executable
1292232809Sjmallett *   entry in the work queues.
1293232809Sjmallett *
1294232809Sjmallett * In either case, it's important to note that switches and de-schedules are
1295232809Sjmallett * higher priority operations that can cause the pre-fetch engine to reset.
1296232809Sjmallett * Thus in a system with many switches or de-schedules occurring, it's possible
1297232809Sjmallett * for the new work timer to expire (resulting in NO_WORK responses) before the
1298232809Sjmallett * pre-fetch engine is able to get very deep into the work queues.
1299232809Sjmallett */
1300232809Sjmallettunion cvmx_sso_nw_tim {
1301232809Sjmallett	uint64_t u64;
1302232809Sjmallett	struct cvmx_sso_nw_tim_s {
1303232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1304232809Sjmallett	uint64_t reserved_10_63               : 54;
1305232809Sjmallett	uint64_t nw_tim                       : 10; /**< New work timer period */
1306232809Sjmallett#else
1307232809Sjmallett	uint64_t nw_tim                       : 10;
1308232809Sjmallett	uint64_t reserved_10_63               : 54;
1309232809Sjmallett#endif
1310232809Sjmallett	} s;
1311232809Sjmallett	struct cvmx_sso_nw_tim_s              cn68xx;
1312232809Sjmallett	struct cvmx_sso_nw_tim_s              cn68xxp1;
1313232809Sjmallett};
1314232809Sjmalletttypedef union cvmx_sso_nw_tim cvmx_sso_nw_tim_t;
1315232809Sjmallett
1316232809Sjmallett/**
1317232809Sjmallett * cvmx_sso_oth_ecc_ctl
1318232809Sjmallett *
1319232809Sjmallett * SSO_OTH_ECC_CTL = SSO OTH ECC Control
1320232809Sjmallett *
1321232809Sjmallett */
1322232809Sjmallettunion cvmx_sso_oth_ecc_ctl {
1323232809Sjmallett	uint64_t u64;
1324232809Sjmallett	struct cvmx_sso_oth_ecc_ctl_s {
1325232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1326232809Sjmallett	uint64_t reserved_6_63                : 58;
1327232809Sjmallett	uint64_t flip_synd1                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1328232809Sjmallett                                                         double bit error for the odd OTH RAM. */
1329232809Sjmallett	uint64_t ecc_ena1                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1330232809Sjmallett                                                         correct logic for the odd OTH RAM. */
1331232809Sjmallett	uint64_t flip_synd0                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1332232809Sjmallett                                                         double bit error for the even OTH RAM. */
1333232809Sjmallett	uint64_t ecc_ena0                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1334232809Sjmallett                                                         correct logic for the even OTH RAM. */
1335232809Sjmallett#else
1336232809Sjmallett	uint64_t ecc_ena0                     : 1;
1337232809Sjmallett	uint64_t flip_synd0                   : 2;
1338232809Sjmallett	uint64_t ecc_ena1                     : 1;
1339232809Sjmallett	uint64_t flip_synd1                   : 2;
1340232809Sjmallett	uint64_t reserved_6_63                : 58;
1341232809Sjmallett#endif
1342232809Sjmallett	} s;
1343232809Sjmallett	struct cvmx_sso_oth_ecc_ctl_s         cn68xx;
1344232809Sjmallett	struct cvmx_sso_oth_ecc_ctl_s         cn68xxp1;
1345232809Sjmallett};
1346232809Sjmalletttypedef union cvmx_sso_oth_ecc_ctl cvmx_sso_oth_ecc_ctl_t;
1347232809Sjmallett
1348232809Sjmallett/**
1349232809Sjmallett * cvmx_sso_oth_ecc_st
1350232809Sjmallett *
1351232809Sjmallett * SSO_OTH_ECC_ST = SSO OTH ECC Status
1352232809Sjmallett *
1353232809Sjmallett */
1354232809Sjmallettunion cvmx_sso_oth_ecc_st {
1355232809Sjmallett	uint64_t u64;
1356232809Sjmallett	struct cvmx_sso_oth_ecc_st_s {
1357232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1358232809Sjmallett	uint64_t reserved_59_63               : 5;
1359232809Sjmallett	uint64_t addr1                        : 11; /**< Latch the address for latest sde/dbe occured
1360232809Sjmallett                                                         for the odd OTH RAM */
1361232809Sjmallett	uint64_t reserved_43_47               : 5;
1362232809Sjmallett	uint64_t syndrom1                     : 7;  /**< Report the latest error syndrom for the odd
1363232809Sjmallett                                                         OTH RAM */
1364232809Sjmallett	uint64_t reserved_27_35               : 9;
1365232809Sjmallett	uint64_t addr0                        : 11; /**< Latch the address for latest sde/dbe occured
1366232809Sjmallett                                                         for the even OTH RAM */
1367232809Sjmallett	uint64_t reserved_11_15               : 5;
1368232809Sjmallett	uint64_t syndrom0                     : 7;  /**< Report the latest error syndrom for the even
1369232809Sjmallett                                                         OTH RAM */
1370232809Sjmallett	uint64_t reserved_0_3                 : 4;
1371232809Sjmallett#else
1372232809Sjmallett	uint64_t reserved_0_3                 : 4;
1373232809Sjmallett	uint64_t syndrom0                     : 7;
1374232809Sjmallett	uint64_t reserved_11_15               : 5;
1375232809Sjmallett	uint64_t addr0                        : 11;
1376232809Sjmallett	uint64_t reserved_27_35               : 9;
1377232809Sjmallett	uint64_t syndrom1                     : 7;
1378232809Sjmallett	uint64_t reserved_43_47               : 5;
1379232809Sjmallett	uint64_t addr1                        : 11;
1380232809Sjmallett	uint64_t reserved_59_63               : 5;
1381232809Sjmallett#endif
1382232809Sjmallett	} s;
1383232809Sjmallett	struct cvmx_sso_oth_ecc_st_s          cn68xx;
1384232809Sjmallett	struct cvmx_sso_oth_ecc_st_s          cn68xxp1;
1385232809Sjmallett};
1386232809Sjmalletttypedef union cvmx_sso_oth_ecc_st cvmx_sso_oth_ecc_st_t;
1387232809Sjmallett
1388232809Sjmallett/**
1389232809Sjmallett * cvmx_sso_pnd_ecc_ctl
1390232809Sjmallett *
1391232809Sjmallett * SSO_PND_ECC_CTL = SSO PND ECC Control
1392232809Sjmallett *
1393232809Sjmallett */
1394232809Sjmallettunion cvmx_sso_pnd_ecc_ctl {
1395232809Sjmallett	uint64_t u64;
1396232809Sjmallett	struct cvmx_sso_pnd_ecc_ctl_s {
1397232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1398232809Sjmallett	uint64_t reserved_6_63                : 58;
1399232809Sjmallett	uint64_t flip_synd1                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1400232809Sjmallett                                                         double bit error for the odd PND RAM. */
1401232809Sjmallett	uint64_t ecc_ena1                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1402232809Sjmallett                                                         correct logic for the odd PND RAM. */
1403232809Sjmallett	uint64_t flip_synd0                   : 2;  /**< Testing feature. Flip Syndrom to generate single or
1404232809Sjmallett                                                         double bit error for the even PND RAM. */
1405232809Sjmallett	uint64_t ecc_ena0                     : 1;  /**< ECC Enable: When set will enable the 7 bit ECC
1406232809Sjmallett                                                         correct logic for the even PND RAM. */
1407232809Sjmallett#else
1408232809Sjmallett	uint64_t ecc_ena0                     : 1;
1409232809Sjmallett	uint64_t flip_synd0                   : 2;
1410232809Sjmallett	uint64_t ecc_ena1                     : 1;
1411232809Sjmallett	uint64_t flip_synd1                   : 2;
1412232809Sjmallett	uint64_t reserved_6_63                : 58;
1413232809Sjmallett#endif
1414232809Sjmallett	} s;
1415232809Sjmallett	struct cvmx_sso_pnd_ecc_ctl_s         cn68xx;
1416232809Sjmallett	struct cvmx_sso_pnd_ecc_ctl_s         cn68xxp1;
1417232809Sjmallett};
1418232809Sjmalletttypedef union cvmx_sso_pnd_ecc_ctl cvmx_sso_pnd_ecc_ctl_t;
1419232809Sjmallett
1420232809Sjmallett/**
1421232809Sjmallett * cvmx_sso_pnd_ecc_st
1422232809Sjmallett *
1423232809Sjmallett * SSO_PND_ECC_ST = SSO PND ECC Status
1424232809Sjmallett *
1425232809Sjmallett */
1426232809Sjmallettunion cvmx_sso_pnd_ecc_st {
1427232809Sjmallett	uint64_t u64;
1428232809Sjmallett	struct cvmx_sso_pnd_ecc_st_s {
1429232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1430232809Sjmallett	uint64_t reserved_59_63               : 5;
1431232809Sjmallett	uint64_t addr1                        : 11; /**< Latch the address for latest sde/dbe occured
1432232809Sjmallett                                                         for the odd PND RAM */
1433232809Sjmallett	uint64_t reserved_43_47               : 5;
1434232809Sjmallett	uint64_t syndrom1                     : 7;  /**< Report the latest error syndrom for the odd
1435232809Sjmallett                                                         PND RAM */
1436232809Sjmallett	uint64_t reserved_27_35               : 9;
1437232809Sjmallett	uint64_t addr0                        : 11; /**< Latch the address for latest sde/dbe occured
1438232809Sjmallett                                                         for the even PND RAM */
1439232809Sjmallett	uint64_t reserved_11_15               : 5;
1440232809Sjmallett	uint64_t syndrom0                     : 7;  /**< Report the latest error syndrom for the even
1441232809Sjmallett                                                         PND RAM */
1442232809Sjmallett	uint64_t reserved_0_3                 : 4;
1443232809Sjmallett#else
1444232809Sjmallett	uint64_t reserved_0_3                 : 4;
1445232809Sjmallett	uint64_t syndrom0                     : 7;
1446232809Sjmallett	uint64_t reserved_11_15               : 5;
1447232809Sjmallett	uint64_t addr0                        : 11;
1448232809Sjmallett	uint64_t reserved_27_35               : 9;
1449232809Sjmallett	uint64_t syndrom1                     : 7;
1450232809Sjmallett	uint64_t reserved_43_47               : 5;
1451232809Sjmallett	uint64_t addr1                        : 11;
1452232809Sjmallett	uint64_t reserved_59_63               : 5;
1453232809Sjmallett#endif
1454232809Sjmallett	} s;
1455232809Sjmallett	struct cvmx_sso_pnd_ecc_st_s          cn68xx;
1456232809Sjmallett	struct cvmx_sso_pnd_ecc_st_s          cn68xxp1;
1457232809Sjmallett};
1458232809Sjmalletttypedef union cvmx_sso_pnd_ecc_st cvmx_sso_pnd_ecc_st_t;
1459232809Sjmallett
1460232809Sjmallett/**
1461232809Sjmallett * cvmx_sso_pp#_grp_msk
1462232809Sjmallett *
1463232809Sjmallett * CSR reserved addresses: (24): 0x5040..0x50f8
1464232809Sjmallett * CSR align addresses: ===========================================================================================================
1465232809Sjmallett * SSO_PPX_GRP_MSK = SSO PP Group Mask Register
1466232809Sjmallett *                   (one bit per group per PP)
1467232809Sjmallett *
1468232809Sjmallett * Selects which group(s) a PP belongs to.  A '1' in any bit position sets the
1469232809Sjmallett * PP's membership in the corresponding group.  A value of 0x0 will prevent the
1470232809Sjmallett * PP from receiving new work.
1471232809Sjmallett *
1472232809Sjmallett * Note that these do not contain QOS level priorities for each PP.  This is a
1473232809Sjmallett * change from previous POW designs.
1474232809Sjmallett */
1475232809Sjmallettunion cvmx_sso_ppx_grp_msk {
1476232809Sjmallett	uint64_t u64;
1477232809Sjmallett	struct cvmx_sso_ppx_grp_msk_s {
1478232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1479232809Sjmallett	uint64_t grp_msk                      : 64; /**< PPX group mask */
1480232809Sjmallett#else
1481232809Sjmallett	uint64_t grp_msk                      : 64;
1482232809Sjmallett#endif
1483232809Sjmallett	} s;
1484232809Sjmallett	struct cvmx_sso_ppx_grp_msk_s         cn68xx;
1485232809Sjmallett	struct cvmx_sso_ppx_grp_msk_s         cn68xxp1;
1486232809Sjmallett};
1487232809Sjmalletttypedef union cvmx_sso_ppx_grp_msk cvmx_sso_ppx_grp_msk_t;
1488232809Sjmallett
1489232809Sjmallett/**
1490232809Sjmallett * cvmx_sso_pp#_qos_pri
1491232809Sjmallett *
1492232809Sjmallett * CSR reserved addresses: (56): 0x2040..0x21f8
1493232809Sjmallett * CSR align addresses: ===========================================================================================================
1494232809Sjmallett * SSO_PP(0..31)_QOS_PRI = SSO PP QOS Priority Register
1495232809Sjmallett *                                (one field per IQ per PP)
1496232809Sjmallett *
1497232809Sjmallett * Contains the QOS level priorities for each PP.
1498232809Sjmallett *      0x0       is the highest priority
1499232809Sjmallett *      0x7       is the lowest priority
1500232809Sjmallett *      0xf       prevents the PP from receiving work from that QOS level
1501232809Sjmallett *      0x8-0xe   Reserved
1502232809Sjmallett *
1503232809Sjmallett * For a given PP, priorities should begin at 0x0, and remain contiguous
1504232809Sjmallett * throughout the range.  Failure to do so may result in severe
1505232809Sjmallett * performance degradation.
1506232809Sjmallett *
1507232809Sjmallett *
1508232809Sjmallett * Priorities for IQs 0..7
1509232809Sjmallett */
1510232809Sjmallettunion cvmx_sso_ppx_qos_pri {
1511232809Sjmallett	uint64_t u64;
1512232809Sjmallett	struct cvmx_sso_ppx_qos_pri_s {
1513232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1514232809Sjmallett	uint64_t reserved_60_63               : 4;
1515232809Sjmallett	uint64_t qos7_pri                     : 4;  /**< QOS7 priority for PPX */
1516232809Sjmallett	uint64_t reserved_52_55               : 4;
1517232809Sjmallett	uint64_t qos6_pri                     : 4;  /**< QOS6 priority for PPX */
1518232809Sjmallett	uint64_t reserved_44_47               : 4;
1519232809Sjmallett	uint64_t qos5_pri                     : 4;  /**< QOS5 priority for PPX */
1520232809Sjmallett	uint64_t reserved_36_39               : 4;
1521232809Sjmallett	uint64_t qos4_pri                     : 4;  /**< QOS4 priority for PPX */
1522232809Sjmallett	uint64_t reserved_28_31               : 4;
1523232809Sjmallett	uint64_t qos3_pri                     : 4;  /**< QOS3 priority for PPX */
1524232809Sjmallett	uint64_t reserved_20_23               : 4;
1525232809Sjmallett	uint64_t qos2_pri                     : 4;  /**< QOS2 priority for PPX */
1526232809Sjmallett	uint64_t reserved_12_15               : 4;
1527232809Sjmallett	uint64_t qos1_pri                     : 4;  /**< QOS1 priority for PPX */
1528232809Sjmallett	uint64_t reserved_4_7                 : 4;
1529232809Sjmallett	uint64_t qos0_pri                     : 4;  /**< QOS0 priority for PPX */
1530232809Sjmallett#else
1531232809Sjmallett	uint64_t qos0_pri                     : 4;
1532232809Sjmallett	uint64_t reserved_4_7                 : 4;
1533232809Sjmallett	uint64_t qos1_pri                     : 4;
1534232809Sjmallett	uint64_t reserved_12_15               : 4;
1535232809Sjmallett	uint64_t qos2_pri                     : 4;
1536232809Sjmallett	uint64_t reserved_20_23               : 4;
1537232809Sjmallett	uint64_t qos3_pri                     : 4;
1538232809Sjmallett	uint64_t reserved_28_31               : 4;
1539232809Sjmallett	uint64_t qos4_pri                     : 4;
1540232809Sjmallett	uint64_t reserved_36_39               : 4;
1541232809Sjmallett	uint64_t qos5_pri                     : 4;
1542232809Sjmallett	uint64_t reserved_44_47               : 4;
1543232809Sjmallett	uint64_t qos6_pri                     : 4;
1544232809Sjmallett	uint64_t reserved_52_55               : 4;
1545232809Sjmallett	uint64_t qos7_pri                     : 4;
1546232809Sjmallett	uint64_t reserved_60_63               : 4;
1547232809Sjmallett#endif
1548232809Sjmallett	} s;
1549232809Sjmallett	struct cvmx_sso_ppx_qos_pri_s         cn68xx;
1550232809Sjmallett	struct cvmx_sso_ppx_qos_pri_s         cn68xxp1;
1551232809Sjmallett};
1552232809Sjmalletttypedef union cvmx_sso_ppx_qos_pri cvmx_sso_ppx_qos_pri_t;
1553232809Sjmallett
1554232809Sjmallett/**
1555232809Sjmallett * cvmx_sso_pp_strict
1556232809Sjmallett *
1557232809Sjmallett * SSO_PP_STRICT = SSO Strict Priority
1558232809Sjmallett *
1559232809Sjmallett * This register controls getting work from the input queues.  If the bit
1560232809Sjmallett * corresponding to a PP is set, that PP will not take work off the input
1561232809Sjmallett * queues until it is known that there is no higher-priority work available.
1562232809Sjmallett *
1563232809Sjmallett * Setting SSO_PP_STRICT may incur a performance penalty if highest-priority
1564232809Sjmallett * work is not found early.
1565232809Sjmallett *
1566232809Sjmallett * It is possible to starve a PP of work with SSO_PP_STRICT.  If the
1567232809Sjmallett * SSO_PPX_GRP_MSK for a PP masks-out much of the work added to the input
1568232809Sjmallett * queues that are higher-priority for that PP, and if there is a constant
1569232809Sjmallett * stream of work through one or more of those higher-priority input queues,
1570232809Sjmallett * then that PP may not accept work from lower-priority input queues.  This can
1571232809Sjmallett * be alleviated by ensuring that most or all the work added to the
1572232809Sjmallett * higher-priority input queues for a PP with SSO_PP_STRICT set are in a group
1573232809Sjmallett * acceptable to that PP.
1574232809Sjmallett *
1575232809Sjmallett * It is also possible to neglect work in an input queue if SSO_PP_STRICT is
1576232809Sjmallett * used.  If an input queue is a lower-priority queue for all PPs, and if all
1577232809Sjmallett * the PPs have their corresponding bit in SSO_PP_STRICT set, then work may
1578232809Sjmallett * never be taken (or be seldom taken) from that queue.  This can be alleviated
1579232809Sjmallett * by ensuring that work in all input queues can be serviced by one or more PPs
1580232809Sjmallett * that do not have SSO_PP_STRICT set, or that the input queue is the
1581232809Sjmallett * highest-priority input queue for one or more PPs that do have SSO_PP_STRICT
1582232809Sjmallett * set.
1583232809Sjmallett */
1584232809Sjmallettunion cvmx_sso_pp_strict {
1585232809Sjmallett	uint64_t u64;
1586232809Sjmallett	struct cvmx_sso_pp_strict_s {
1587232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1588232809Sjmallett	uint64_t reserved_32_63               : 32;
1589232809Sjmallett	uint64_t pp_strict                    : 32; /**< Corresponding PP operates in strict mode. */
1590232809Sjmallett#else
1591232809Sjmallett	uint64_t pp_strict                    : 32;
1592232809Sjmallett	uint64_t reserved_32_63               : 32;
1593232809Sjmallett#endif
1594232809Sjmallett	} s;
1595232809Sjmallett	struct cvmx_sso_pp_strict_s           cn68xx;
1596232809Sjmallett	struct cvmx_sso_pp_strict_s           cn68xxp1;
1597232809Sjmallett};
1598232809Sjmalletttypedef union cvmx_sso_pp_strict cvmx_sso_pp_strict_t;
1599232809Sjmallett
1600232809Sjmallett/**
1601232809Sjmallett * cvmx_sso_qos#_rnd
1602232809Sjmallett *
1603232809Sjmallett * CSR align addresses: ===========================================================================================================
1604232809Sjmallett * SSO_QOS(0..7)_RND = SSO QOS Issue Round Register
1605232809Sjmallett *                (one per IQ)
1606232809Sjmallett *
1607232809Sjmallett * The number of arbitration rounds each QOS level participates in.
1608232809Sjmallett */
1609232809Sjmallettunion cvmx_sso_qosx_rnd {
1610232809Sjmallett	uint64_t u64;
1611232809Sjmallett	struct cvmx_sso_qosx_rnd_s {
1612232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1613232809Sjmallett	uint64_t reserved_8_63                : 56;
1614232809Sjmallett	uint64_t rnds_qos                     : 8;  /**< Number of rounds to participate in for IQ(X). */
1615232809Sjmallett#else
1616232809Sjmallett	uint64_t rnds_qos                     : 8;
1617232809Sjmallett	uint64_t reserved_8_63                : 56;
1618232809Sjmallett#endif
1619232809Sjmallett	} s;
1620232809Sjmallett	struct cvmx_sso_qosx_rnd_s            cn68xx;
1621232809Sjmallett	struct cvmx_sso_qosx_rnd_s            cn68xxp1;
1622232809Sjmallett};
1623232809Sjmalletttypedef union cvmx_sso_qosx_rnd cvmx_sso_qosx_rnd_t;
1624232809Sjmallett
1625232809Sjmallett/**
1626232809Sjmallett * cvmx_sso_qos_thr#
1627232809Sjmallett *
1628232809Sjmallett * CSR reserved addresses: (24): 0xa040..0xa0f8
1629232809Sjmallett * CSR align addresses: ===========================================================================================================
1630232809Sjmallett * SSO_QOS_THRX = SSO QOS Threshold Register
1631232809Sjmallett *                (one per QOS level)
1632232809Sjmallett *
1633232809Sjmallett * Contains the thresholds for allocating SSO internal storage buffers.  If the
1634232809Sjmallett * number of remaining free buffers drops below the minimum threshold (MIN_THR)
1635232809Sjmallett * or the number of allocated buffers for this QOS level rises above the
1636232809Sjmallett * maximum threshold (MAX_THR), future incoming work queue entries will be
1637232809Sjmallett * buffered externally rather than internally.  This register also contains the
1638232809Sjmallett * number of internal buffers currently allocated to this QOS level (BUF_CNT).
1639232809Sjmallett */
1640232809Sjmallettunion cvmx_sso_qos_thrx {
1641232809Sjmallett	uint64_t u64;
1642232809Sjmallett	struct cvmx_sso_qos_thrx_s {
1643232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1644232809Sjmallett	uint64_t reserved_40_63               : 24;
1645232809Sjmallett	uint64_t buf_cnt                      : 12; /**< # of internal buffers allocated to QOS level X */
1646232809Sjmallett	uint64_t reserved_26_27               : 2;
1647232809Sjmallett	uint64_t max_thr                      : 12; /**< Max threshold for QOS level X
1648232809Sjmallett                                                         For performance reasons, MAX_THR can have a slop of 4
1649232809Sjmallett                                                         WQE for QOS level X. */
1650232809Sjmallett	uint64_t reserved_12_13               : 2;
1651232809Sjmallett	uint64_t min_thr                      : 12; /**< Min threshold for QOS level X
1652232809Sjmallett                                                         For performance reasons, MIN_THR can have a slop of 4
1653232809Sjmallett                                                         WQEs for QOS level X. */
1654232809Sjmallett#else
1655232809Sjmallett	uint64_t min_thr                      : 12;
1656232809Sjmallett	uint64_t reserved_12_13               : 2;
1657232809Sjmallett	uint64_t max_thr                      : 12;
1658232809Sjmallett	uint64_t reserved_26_27               : 2;
1659232809Sjmallett	uint64_t buf_cnt                      : 12;
1660232809Sjmallett	uint64_t reserved_40_63               : 24;
1661232809Sjmallett#endif
1662232809Sjmallett	} s;
1663232809Sjmallett	struct cvmx_sso_qos_thrx_s            cn68xx;
1664232809Sjmallett	struct cvmx_sso_qos_thrx_s            cn68xxp1;
1665232809Sjmallett};
1666232809Sjmalletttypedef union cvmx_sso_qos_thrx cvmx_sso_qos_thrx_t;
1667232809Sjmallett
1668232809Sjmallett/**
1669232809Sjmallett * cvmx_sso_qos_we
1670232809Sjmallett *
1671232809Sjmallett * SSO_QOS_WE = SSO WE Buffers
1672232809Sjmallett *
1673232809Sjmallett * This register contains a read-only count of the current number of free
1674232809Sjmallett * buffers (FREE_CNT) and the total number of tag chain heads on the de-schedule list
1675232809Sjmallett * (DES_CNT) (which is not the same as the total number of entries on all of the descheduled
1676232809Sjmallett * tag chains.)
1677232809Sjmallett */
1678232809Sjmallettunion cvmx_sso_qos_we {
1679232809Sjmallett	uint64_t u64;
1680232809Sjmallett	struct cvmx_sso_qos_we_s {
1681232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1682232809Sjmallett	uint64_t reserved_26_63               : 38;
1683232809Sjmallett	uint64_t des_cnt                      : 12; /**< Number of buffers on de-schedule list */
1684232809Sjmallett	uint64_t reserved_12_13               : 2;
1685232809Sjmallett	uint64_t free_cnt                     : 12; /**< Number of total free buffers */
1686232809Sjmallett#else
1687232809Sjmallett	uint64_t free_cnt                     : 12;
1688232809Sjmallett	uint64_t reserved_12_13               : 2;
1689232809Sjmallett	uint64_t des_cnt                      : 12;
1690232809Sjmallett	uint64_t reserved_26_63               : 38;
1691232809Sjmallett#endif
1692232809Sjmallett	} s;
1693232809Sjmallett	struct cvmx_sso_qos_we_s              cn68xx;
1694232809Sjmallett	struct cvmx_sso_qos_we_s              cn68xxp1;
1695232809Sjmallett};
1696232809Sjmalletttypedef union cvmx_sso_qos_we cvmx_sso_qos_we_t;
1697232809Sjmallett
1698232809Sjmallett/**
1699232809Sjmallett * cvmx_sso_reset
1700232809Sjmallett *
1701232809Sjmallett * SSO_RESET = SSO Soft Reset
1702232809Sjmallett *
1703232809Sjmallett * Writing a one to SSO_RESET[RESET] will reset the SSO.  After receiving a
1704232809Sjmallett * store to this CSR, the SSO must not be sent any other operations for 2500
1705232809Sjmallett * sclk cycles.
1706232809Sjmallett *
1707232809Sjmallett * Note that the contents of this register are reset along with the rest of the
1708232809Sjmallett * SSO.
1709232809Sjmallett *
1710232809Sjmallett * IMPLEMENTATION NOTES--NOT FOR SPEC:
1711232809Sjmallett *      The SSO must return the bus credit associated with the CSR store used
1712232809Sjmallett *      to write this register before reseting itself.  And the RSL tree
1713232809Sjmallett *      that passes through the SSO must continue to work for RSL operations
1714232809Sjmallett *      that do not target the SSO itself.
1715232809Sjmallett */
1716232809Sjmallettunion cvmx_sso_reset {
1717232809Sjmallett	uint64_t u64;
1718232809Sjmallett	struct cvmx_sso_reset_s {
1719232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1720232809Sjmallett	uint64_t reserved_1_63                : 63;
1721232809Sjmallett	uint64_t reset                        : 1;  /**< Reset the SSO */
1722232809Sjmallett#else
1723232809Sjmallett	uint64_t reset                        : 1;
1724232809Sjmallett	uint64_t reserved_1_63                : 63;
1725232809Sjmallett#endif
1726232809Sjmallett	} s;
1727232809Sjmallett	struct cvmx_sso_reset_s               cn68xx;
1728232809Sjmallett};
1729232809Sjmalletttypedef union cvmx_sso_reset cvmx_sso_reset_t;
1730232809Sjmallett
1731232809Sjmallett/**
1732232809Sjmallett * cvmx_sso_rwq_head_ptr#
1733232809Sjmallett *
1734232809Sjmallett * CSR reserved addresses: (24): 0xb040..0xb0f8
1735232809Sjmallett * CSR align addresses: ===========================================================================================================
1736232809Sjmallett * SSO_RWQ_HEAD_PTRX = SSO Remote Queue Head Register
1737232809Sjmallett *                (one per QOS level)
1738232809Sjmallett * Contains the ptr to the first entry of the remote linked list(s) for a particular
1739232809Sjmallett * QoS level. SW should initialize the remote linked list(s) by programming
1740232809Sjmallett * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
1741232809Sjmallett */
1742232809Sjmallettunion cvmx_sso_rwq_head_ptrx {
1743232809Sjmallett	uint64_t u64;
1744232809Sjmallett	struct cvmx_sso_rwq_head_ptrx_s {
1745232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1746232809Sjmallett	uint64_t reserved_38_63               : 26;
1747232809Sjmallett	uint64_t ptr                          : 31; /**< Head Pointer */
1748232809Sjmallett	uint64_t reserved_5_6                 : 2;
1749232809Sjmallett	uint64_t rctr                         : 5;  /**< Index of next WQE entry in fill packet to be
1750232809Sjmallett                                                         processed (inbound queues) */
1751232809Sjmallett#else
1752232809Sjmallett	uint64_t rctr                         : 5;
1753232809Sjmallett	uint64_t reserved_5_6                 : 2;
1754232809Sjmallett	uint64_t ptr                          : 31;
1755232809Sjmallett	uint64_t reserved_38_63               : 26;
1756232809Sjmallett#endif
1757232809Sjmallett	} s;
1758232809Sjmallett	struct cvmx_sso_rwq_head_ptrx_s       cn68xx;
1759232809Sjmallett	struct cvmx_sso_rwq_head_ptrx_s       cn68xxp1;
1760232809Sjmallett};
1761232809Sjmalletttypedef union cvmx_sso_rwq_head_ptrx cvmx_sso_rwq_head_ptrx_t;
1762232809Sjmallett
1763232809Sjmallett/**
1764232809Sjmallett * cvmx_sso_rwq_pop_fptr
1765232809Sjmallett *
1766232809Sjmallett * SSO_RWQ_POP_FPTR = SSO Pop Free Pointer
1767232809Sjmallett *
1768232809Sjmallett * This register is used by SW to remove pointers for buffer-reallocation and diagnostics, and
1769232809Sjmallett * should only be used when SSO is idle.
1770232809Sjmallett *
1771232809Sjmallett * To remove ALL pointers, software must insure that there are modulus 16
1772232809Sjmallett * pointers in the FPA.  To do this, SSO_CFG.RWQ_BYP_DIS must be set, the FPA
1773232809Sjmallett * pointer count read, and enough fake buffers pushed via SSO_RWQ_PSH_FPTR to
1774232809Sjmallett * bring the FPA pointer count up to mod 16.
1775232809Sjmallett */
1776232809Sjmallettunion cvmx_sso_rwq_pop_fptr {
1777232809Sjmallett	uint64_t u64;
1778232809Sjmallett	struct cvmx_sso_rwq_pop_fptr_s {
1779232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1780232809Sjmallett	uint64_t val                          : 1;  /**< Free Pointer Valid */
1781232809Sjmallett	uint64_t cnt                          : 6;  /**< fptr_in count */
1782232809Sjmallett	uint64_t reserved_38_56               : 19;
1783232809Sjmallett	uint64_t fptr                         : 31; /**< Free Pointer */
1784232809Sjmallett	uint64_t reserved_0_6                 : 7;
1785232809Sjmallett#else
1786232809Sjmallett	uint64_t reserved_0_6                 : 7;
1787232809Sjmallett	uint64_t fptr                         : 31;
1788232809Sjmallett	uint64_t reserved_38_56               : 19;
1789232809Sjmallett	uint64_t cnt                          : 6;
1790232809Sjmallett	uint64_t val                          : 1;
1791232809Sjmallett#endif
1792232809Sjmallett	} s;
1793232809Sjmallett	struct cvmx_sso_rwq_pop_fptr_s        cn68xx;
1794232809Sjmallett	struct cvmx_sso_rwq_pop_fptr_cn68xxp1 {
1795232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1796232809Sjmallett	uint64_t val                          : 1;  /**< Free Pointer Valid */
1797232809Sjmallett	uint64_t reserved_38_62               : 25;
1798232809Sjmallett	uint64_t fptr                         : 31; /**< Free Pointer */
1799232809Sjmallett	uint64_t reserved_0_6                 : 7;
1800232809Sjmallett#else
1801232809Sjmallett	uint64_t reserved_0_6                 : 7;
1802232809Sjmallett	uint64_t fptr                         : 31;
1803232809Sjmallett	uint64_t reserved_38_62               : 25;
1804232809Sjmallett	uint64_t val                          : 1;
1805232809Sjmallett#endif
1806232809Sjmallett	} cn68xxp1;
1807232809Sjmallett};
1808232809Sjmalletttypedef union cvmx_sso_rwq_pop_fptr cvmx_sso_rwq_pop_fptr_t;
1809232809Sjmallett
1810232809Sjmallett/**
1811232809Sjmallett * cvmx_sso_rwq_psh_fptr
1812232809Sjmallett *
1813232809Sjmallett * CSR reserved addresses: (56): 0xc240..0xc3f8
1814232809Sjmallett * SSO_RWQ_PSH_FPTR = SSO Free Pointer FIFO
1815232809Sjmallett *
1816232809Sjmallett * This register is used by SW to initialize the SSO with a pool of free
1817232809Sjmallett * pointers by writing the FPTR field whenever FULL = 0. Free pointers are
1818232809Sjmallett * fetched/released from/to the pool when accessing WQE entries stored remotely
1819232809Sjmallett * (in remote linked lists).  Free pointers should be 128 byte aligned, each of
1820232809Sjmallett * 256 bytes. This register should only be used when SSO is idle.
1821232809Sjmallett *
1822232809Sjmallett * Software needs to set aside buffering for
1823232809Sjmallett *      8 + 48 + ROUNDUP(N/26)
1824232809Sjmallett *
1825232809Sjmallett * where as many as N DRAM work queue entries may be used.  The first 8 buffers
1826232809Sjmallett * are used to setup the SSO_RWQ_HEAD_PTR and SSO_RWQ_TAIL_PTRs, and the
1827232809Sjmallett * remainder are pushed via this register.
1828232809Sjmallett *
1829232809Sjmallett * IMPLEMENTATION NOTES--NOT FOR SPEC:
1830232809Sjmallett *      48 avoids false out of buffer error due to (16) FPA and in-sso FPA buffering (32)
1831232809Sjmallett *      26 is number of WAE's per 256B buffer
1832232809Sjmallett */
1833232809Sjmallettunion cvmx_sso_rwq_psh_fptr {
1834232809Sjmallett	uint64_t u64;
1835232809Sjmallett	struct cvmx_sso_rwq_psh_fptr_s {
1836232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1837232809Sjmallett	uint64_t full                         : 1;  /**< FIFO Full.  When set, the FPA is busy writing entries
1838232809Sjmallett                                                         and software must wait before adding new entries. */
1839232809Sjmallett	uint64_t cnt                          : 4;  /**< fptr_out count */
1840232809Sjmallett	uint64_t reserved_38_58               : 21;
1841232809Sjmallett	uint64_t fptr                         : 31; /**< Free Pointer */
1842232809Sjmallett	uint64_t reserved_0_6                 : 7;
1843232809Sjmallett#else
1844232809Sjmallett	uint64_t reserved_0_6                 : 7;
1845232809Sjmallett	uint64_t fptr                         : 31;
1846232809Sjmallett	uint64_t reserved_38_58               : 21;
1847232809Sjmallett	uint64_t cnt                          : 4;
1848232809Sjmallett	uint64_t full                         : 1;
1849232809Sjmallett#endif
1850232809Sjmallett	} s;
1851232809Sjmallett	struct cvmx_sso_rwq_psh_fptr_s        cn68xx;
1852232809Sjmallett	struct cvmx_sso_rwq_psh_fptr_cn68xxp1 {
1853232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1854232809Sjmallett	uint64_t full                         : 1;  /**< FIFO Full.  When set, the FPA is busy writing entries
1855232809Sjmallett                                                         and software must wait before adding new entries. */
1856232809Sjmallett	uint64_t reserved_38_62               : 25;
1857232809Sjmallett	uint64_t fptr                         : 31; /**< Free Pointer */
1858232809Sjmallett	uint64_t reserved_0_6                 : 7;
1859232809Sjmallett#else
1860232809Sjmallett	uint64_t reserved_0_6                 : 7;
1861232809Sjmallett	uint64_t fptr                         : 31;
1862232809Sjmallett	uint64_t reserved_38_62               : 25;
1863232809Sjmallett	uint64_t full                         : 1;
1864232809Sjmallett#endif
1865232809Sjmallett	} cn68xxp1;
1866232809Sjmallett};
1867232809Sjmalletttypedef union cvmx_sso_rwq_psh_fptr cvmx_sso_rwq_psh_fptr_t;
1868232809Sjmallett
1869232809Sjmallett/**
1870232809Sjmallett * cvmx_sso_rwq_tail_ptr#
1871232809Sjmallett *
1872232809Sjmallett * CSR reserved addresses: (56): 0xc040..0xc1f8
1873232809Sjmallett * SSO_RWQ_TAIL_PTRX = SSO Remote Queue Tail Register
1874232809Sjmallett *                (one per QOS level)
1875232809Sjmallett * Contains the ptr to the last entry of the remote linked list(s) for a particular
1876232809Sjmallett * QoS level. SW must initialize the remote linked list(s) by programming
1877232809Sjmallett * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
1878232809Sjmallett */
1879232809Sjmallettunion cvmx_sso_rwq_tail_ptrx {
1880232809Sjmallett	uint64_t u64;
1881232809Sjmallett	struct cvmx_sso_rwq_tail_ptrx_s {
1882232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1883232809Sjmallett	uint64_t reserved_38_63               : 26;
1884232809Sjmallett	uint64_t ptr                          : 31; /**< Tail Pointer */
1885232809Sjmallett	uint64_t reserved_5_6                 : 2;
1886232809Sjmallett	uint64_t rctr                         : 5;  /**< Number of entries waiting to be sent out to external
1887232809Sjmallett                                                         RAM (outbound queues) */
1888232809Sjmallett#else
1889232809Sjmallett	uint64_t rctr                         : 5;
1890232809Sjmallett	uint64_t reserved_5_6                 : 2;
1891232809Sjmallett	uint64_t ptr                          : 31;
1892232809Sjmallett	uint64_t reserved_38_63               : 26;
1893232809Sjmallett#endif
1894232809Sjmallett	} s;
1895232809Sjmallett	struct cvmx_sso_rwq_tail_ptrx_s       cn68xx;
1896232809Sjmallett	struct cvmx_sso_rwq_tail_ptrx_s       cn68xxp1;
1897232809Sjmallett};
1898232809Sjmalletttypedef union cvmx_sso_rwq_tail_ptrx cvmx_sso_rwq_tail_ptrx_t;
1899232809Sjmallett
1900232809Sjmallett/**
1901232809Sjmallett * cvmx_sso_ts_pc
1902232809Sjmallett *
1903232809Sjmallett * SSO_TS_PC = SSO Tag Switch Performance Counter
1904232809Sjmallett *
1905232809Sjmallett * Counts the number of tag switch requests.
1906232809Sjmallett * Counter rolls over through zero when max value exceeded.
1907232809Sjmallett */
1908232809Sjmallettunion cvmx_sso_ts_pc {
1909232809Sjmallett	uint64_t u64;
1910232809Sjmallett	struct cvmx_sso_ts_pc_s {
1911232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1912232809Sjmallett	uint64_t ts_pc                        : 64; /**< Tag switch performance counter */
1913232809Sjmallett#else
1914232809Sjmallett	uint64_t ts_pc                        : 64;
1915232809Sjmallett#endif
1916232809Sjmallett	} s;
1917232809Sjmallett	struct cvmx_sso_ts_pc_s               cn68xx;
1918232809Sjmallett	struct cvmx_sso_ts_pc_s               cn68xxp1;
1919232809Sjmallett};
1920232809Sjmalletttypedef union cvmx_sso_ts_pc cvmx_sso_ts_pc_t;
1921232809Sjmallett
1922232809Sjmallett/**
1923232809Sjmallett * cvmx_sso_wa_com_pc
1924232809Sjmallett *
1925232809Sjmallett * SSO_WA_COM_PC = SSO Work Add Combined Performance Counter
1926232809Sjmallett *
1927232809Sjmallett * Counts the number of add new work requests for all QOS levels.
1928232809Sjmallett * Counter rolls over through zero when max value exceeded.
1929232809Sjmallett */
1930232809Sjmallettunion cvmx_sso_wa_com_pc {
1931232809Sjmallett	uint64_t u64;
1932232809Sjmallett	struct cvmx_sso_wa_com_pc_s {
1933232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1934232809Sjmallett	uint64_t wa_pc                        : 64; /**< Work add combined performance counter */
1935232809Sjmallett#else
1936232809Sjmallett	uint64_t wa_pc                        : 64;
1937232809Sjmallett#endif
1938232809Sjmallett	} s;
1939232809Sjmallett	struct cvmx_sso_wa_com_pc_s           cn68xx;
1940232809Sjmallett	struct cvmx_sso_wa_com_pc_s           cn68xxp1;
1941232809Sjmallett};
1942232809Sjmalletttypedef union cvmx_sso_wa_com_pc cvmx_sso_wa_com_pc_t;
1943232809Sjmallett
1944232809Sjmallett/**
1945232809Sjmallett * cvmx_sso_wa_pc#
1946232809Sjmallett *
1947232809Sjmallett * CSR reserved addresses: (64): 0x4200..0x43f8
1948232809Sjmallett * CSR align addresses: ===========================================================================================================
1949232809Sjmallett * SSO_WA_PCX = SSO Work Add Performance Counter
1950232809Sjmallett *             (one per QOS level)
1951232809Sjmallett *
1952232809Sjmallett * Counts the number of add new work requests for each QOS level.
1953232809Sjmallett * Counter rolls over through zero when max value exceeded.
1954232809Sjmallett */
1955232809Sjmallettunion cvmx_sso_wa_pcx {
1956232809Sjmallett	uint64_t u64;
1957232809Sjmallett	struct cvmx_sso_wa_pcx_s {
1958232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1959232809Sjmallett	uint64_t wa_pc                        : 64; /**< Work add performance counter for QOS level X */
1960232809Sjmallett#else
1961232809Sjmallett	uint64_t wa_pc                        : 64;
1962232809Sjmallett#endif
1963232809Sjmallett	} s;
1964232809Sjmallett	struct cvmx_sso_wa_pcx_s              cn68xx;
1965232809Sjmallett	struct cvmx_sso_wa_pcx_s              cn68xxp1;
1966232809Sjmallett};
1967232809Sjmalletttypedef union cvmx_sso_wa_pcx cvmx_sso_wa_pcx_t;
1968232809Sjmallett
1969232809Sjmallett/**
1970232809Sjmallett * cvmx_sso_wq_int
1971232809Sjmallett *
1972232809Sjmallett * Note, the old POW offsets ran from 0x0 to 0x3f8, leaving the next available slot at 0x400.
1973232809Sjmallett * To ensure no overlap, start on 4k boundary: 0x1000.
1974232809Sjmallett * SSO_WQ_INT = SSO Work Queue Interrupt Register
1975232809Sjmallett *
1976232809Sjmallett * Contains the bits (one per group) that set work queue interrupts and are
1977232809Sjmallett * used to clear these interrupts.  For more information regarding this
1978232809Sjmallett * register, see the interrupt section of the SSO spec.
1979232809Sjmallett */
1980232809Sjmallettunion cvmx_sso_wq_int {
1981232809Sjmallett	uint64_t u64;
1982232809Sjmallett	struct cvmx_sso_wq_int_s {
1983232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
1984232809Sjmallett	uint64_t wq_int                       : 64; /**< Work queue interrupt bits
1985232809Sjmallett                                                         Corresponding WQ_INT bit is set by HW whenever:
1986232809Sjmallett                                                           - SSO_WQ_INT_CNTX[IQ_CNT] >=
1987232809Sjmallett                                                             SSO_WQ_INT_THRX[IQ_THR] and the threshold
1988232809Sjmallett                                                             interrupt is not disabled.
1989232809Sjmallett                                                             SSO_WQ_IQ_DISX[IQ_DIS<X>]==1 disables the interrupt
1990232809Sjmallett                                                             SSO_WQ_INT_THRX[IQ_THR]==0 disables the int.
1991232809Sjmallett                                                           - SSO_WQ_INT_CNTX[DS_CNT] >=
1992232809Sjmallett                                                             SSO_WQ_INT_THRX[DS_THR] and the threshold
1993232809Sjmallett                                                             interrupt is not disabled
1994232809Sjmallett                                                             SSO_WQ_INT_THRX[DS_THR]==0 disables the int.
1995232809Sjmallett                                                           - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
1996232809Sjmallett                                                             counter SSO_WQ_INT_PC[PC]==0 and
1997232809Sjmallett                                                             SSO_WQ_INT_THRX[TC_EN]==1 and at least one of:
1998232809Sjmallett                                                               - SSO_WQ_INT_CNTX[IQ_CNT] > 0
1999232809Sjmallett                                                               - SSO_WQ_INT_CNTX[DS_CNT] > 0 */
2000232809Sjmallett#else
2001232809Sjmallett	uint64_t wq_int                       : 64;
2002232809Sjmallett#endif
2003232809Sjmallett	} s;
2004232809Sjmallett	struct cvmx_sso_wq_int_s              cn68xx;
2005232809Sjmallett	struct cvmx_sso_wq_int_s              cn68xxp1;
2006232809Sjmallett};
2007232809Sjmalletttypedef union cvmx_sso_wq_int cvmx_sso_wq_int_t;
2008232809Sjmallett
2009232809Sjmallett/**
2010232809Sjmallett * cvmx_sso_wq_int_cnt#
2011232809Sjmallett *
2012232809Sjmallett * CSR reserved addresses: (64): 0x7200..0x73f8
2013232809Sjmallett * CSR align addresses: ===========================================================================================================
2014232809Sjmallett * SSO_WQ_INT_CNTX = SSO Work Queue Interrupt Count Register
2015232809Sjmallett *                   (one per group)
2016232809Sjmallett *
2017232809Sjmallett * Contains a read-only copy of the counts used to trigger work queue
2018232809Sjmallett * interrupts.  For more information regarding this register, see the interrupt
2019232809Sjmallett * section.
2020232809Sjmallett */
2021232809Sjmallettunion cvmx_sso_wq_int_cntx {
2022232809Sjmallett	uint64_t u64;
2023232809Sjmallett	struct cvmx_sso_wq_int_cntx_s {
2024232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
2025232809Sjmallett	uint64_t reserved_32_63               : 32;
2026232809Sjmallett	uint64_t tc_cnt                       : 4;  /**< Time counter current value for group X
2027232809Sjmallett                                                         HW sets TC_CNT to SSO_WQ_INT_THRX[TC_THR] whenever:
2028232809Sjmallett                                                           - corresponding SSO_WQ_INT_CNTX[IQ_CNT]==0 and
2029232809Sjmallett                                                             corresponding SSO_WQ_INT_CNTX[DS_CNT]==0
2030232809Sjmallett                                                           - corresponding SSO_WQ_INT[WQ_INT<X>] is written
2031232809Sjmallett                                                             with a 1 by SW
2032232809Sjmallett                                                           - corresponding SSO_WQ_IQ_DIS[IQ_DIS<X>] is written
2033232809Sjmallett                                                             with a 1 by SW
2034232809Sjmallett                                                           - corresponding SSO_WQ_INT_THRX is written by SW
2035232809Sjmallett                                                           - TC_CNT==1 and periodic counter
2036232809Sjmallett                                                             SSO_WQ_INT_PC[PC]==0
2037232809Sjmallett                                                         Otherwise, HW decrements TC_CNT whenever the
2038232809Sjmallett                                                         periodic counter SSO_WQ_INT_PC[PC]==0.
2039232809Sjmallett                                                         TC_CNT is 0 whenever SSO_WQ_INT_THRX[TC_THR]==0. */
2040232809Sjmallett	uint64_t reserved_26_27               : 2;
2041232809Sjmallett	uint64_t ds_cnt                       : 12; /**< De-schedule executable count for group X */
2042232809Sjmallett	uint64_t reserved_12_13               : 2;
2043232809Sjmallett	uint64_t iq_cnt                       : 12; /**< Input queue executable count for group X */
2044232809Sjmallett#else
2045232809Sjmallett	uint64_t iq_cnt                       : 12;
2046232809Sjmallett	uint64_t reserved_12_13               : 2;
2047232809Sjmallett	uint64_t ds_cnt                       : 12;
2048232809Sjmallett	uint64_t reserved_26_27               : 2;
2049232809Sjmallett	uint64_t tc_cnt                       : 4;
2050232809Sjmallett	uint64_t reserved_32_63               : 32;
2051232809Sjmallett#endif
2052232809Sjmallett	} s;
2053232809Sjmallett	struct cvmx_sso_wq_int_cntx_s         cn68xx;
2054232809Sjmallett	struct cvmx_sso_wq_int_cntx_s         cn68xxp1;
2055232809Sjmallett};
2056232809Sjmalletttypedef union cvmx_sso_wq_int_cntx cvmx_sso_wq_int_cntx_t;
2057232809Sjmallett
2058232809Sjmallett/**
2059232809Sjmallett * cvmx_sso_wq_int_pc
2060232809Sjmallett *
2061232809Sjmallett * CSR reserved addresses: (1): 0x1018..0x1018
2062232809Sjmallett * SSO_WQ_INT_PC = SSO Work Queue Interrupt Periodic Counter Register
2063232809Sjmallett *
2064232809Sjmallett * Contains the threshold value for the work queue interrupt periodic counter
2065232809Sjmallett * and also a read-only copy of the periodic counter.  For more information
2066232809Sjmallett * regarding this register, see the interrupt section.
2067232809Sjmallett */
2068232809Sjmallettunion cvmx_sso_wq_int_pc {
2069232809Sjmallett	uint64_t u64;
2070232809Sjmallett	struct cvmx_sso_wq_int_pc_s {
2071232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
2072232809Sjmallett	uint64_t reserved_60_63               : 4;
2073232809Sjmallett	uint64_t pc                           : 28; /**< Work queue interrupt periodic counter */
2074232809Sjmallett	uint64_t reserved_28_31               : 4;
2075232809Sjmallett	uint64_t pc_thr                       : 20; /**< Work queue interrupt periodic counter threshold */
2076232809Sjmallett	uint64_t reserved_0_7                 : 8;
2077232809Sjmallett#else
2078232809Sjmallett	uint64_t reserved_0_7                 : 8;
2079232809Sjmallett	uint64_t pc_thr                       : 20;
2080232809Sjmallett	uint64_t reserved_28_31               : 4;
2081232809Sjmallett	uint64_t pc                           : 28;
2082232809Sjmallett	uint64_t reserved_60_63               : 4;
2083232809Sjmallett#endif
2084232809Sjmallett	} s;
2085232809Sjmallett	struct cvmx_sso_wq_int_pc_s           cn68xx;
2086232809Sjmallett	struct cvmx_sso_wq_int_pc_s           cn68xxp1;
2087232809Sjmallett};
2088232809Sjmalletttypedef union cvmx_sso_wq_int_pc cvmx_sso_wq_int_pc_t;
2089232809Sjmallett
2090232809Sjmallett/**
2091232809Sjmallett * cvmx_sso_wq_int_thr#
2092232809Sjmallett *
2093232809Sjmallett * CSR reserved addresses: (96): 0x6100..0x63f8
2094232809Sjmallett * CSR align addresses: ===========================================================================================================
2095232809Sjmallett * SSO_WQ_INT_THR(0..63) = SSO Work Queue Interrupt Threshold Registers
2096232809Sjmallett *                         (one per group)
2097232809Sjmallett *
2098232809Sjmallett * Contains the thresholds for enabling and setting work queue interrupts.  For
2099232809Sjmallett * more information, see the interrupt section.
2100232809Sjmallett *
2101232809Sjmallett * Note: Up to 16 of the SSO's internal storage buffers can be allocated
2102232809Sjmallett * for hardware use and are therefore not available for incoming work queue
2103232809Sjmallett * entries.  Additionally, any WS that is not in the EMPTY state consumes a
2104232809Sjmallett * buffer.  Thus in a 32 PP system, it is not advisable to set either IQ_THR or
2105232809Sjmallett * DS_THR to greater than 2048 - 16 - 32*2 = 1968.  Doing so may prevent the
2106232809Sjmallett * interrupt from ever triggering.
2107232809Sjmallett *
2108232809Sjmallett * Priorities for QOS levels 0..7
2109232809Sjmallett */
2110232809Sjmallettunion cvmx_sso_wq_int_thrx {
2111232809Sjmallett	uint64_t u64;
2112232809Sjmallett	struct cvmx_sso_wq_int_thrx_s {
2113232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
2114232809Sjmallett	uint64_t reserved_33_63               : 31;
2115232809Sjmallett	uint64_t tc_en                        : 1;  /**< Time counter interrupt enable for group X
2116232809Sjmallett                                                         TC_EN must be zero when TC_THR==0 */
2117232809Sjmallett	uint64_t tc_thr                       : 4;  /**< Time counter interrupt threshold for group X
2118232809Sjmallett                                                         When TC_THR==0, SSO_WQ_INT_CNTX[TC_CNT] is zero */
2119232809Sjmallett	uint64_t reserved_26_27               : 2;
2120232809Sjmallett	uint64_t ds_thr                       : 12; /**< De-schedule count threshold for group X
2121232809Sjmallett                                                         DS_THR==0 disables the threshold interrupt */
2122232809Sjmallett	uint64_t reserved_12_13               : 2;
2123232809Sjmallett	uint64_t iq_thr                       : 12; /**< Input queue count threshold for group X
2124232809Sjmallett                                                         IQ_THR==0 disables the threshold interrupt */
2125232809Sjmallett#else
2126232809Sjmallett	uint64_t iq_thr                       : 12;
2127232809Sjmallett	uint64_t reserved_12_13               : 2;
2128232809Sjmallett	uint64_t ds_thr                       : 12;
2129232809Sjmallett	uint64_t reserved_26_27               : 2;
2130232809Sjmallett	uint64_t tc_thr                       : 4;
2131232809Sjmallett	uint64_t tc_en                        : 1;
2132232809Sjmallett	uint64_t reserved_33_63               : 31;
2133232809Sjmallett#endif
2134232809Sjmallett	} s;
2135232809Sjmallett	struct cvmx_sso_wq_int_thrx_s         cn68xx;
2136232809Sjmallett	struct cvmx_sso_wq_int_thrx_s         cn68xxp1;
2137232809Sjmallett};
2138232809Sjmalletttypedef union cvmx_sso_wq_int_thrx cvmx_sso_wq_int_thrx_t;
2139232809Sjmallett
2140232809Sjmallett/**
2141232809Sjmallett * cvmx_sso_wq_iq_dis
2142232809Sjmallett *
2143232809Sjmallett * CSR reserved addresses: (1): 0x1008..0x1008
2144232809Sjmallett * SSO_WQ_IQ_DIS = SSO Input Queue Interrupt Temporary Disable Mask
2145232809Sjmallett *
2146232809Sjmallett * Contains the input queue interrupt temporary disable bits (one per group).
2147232809Sjmallett * For more information regarding this register, see the interrupt section.
2148232809Sjmallett */
2149232809Sjmallettunion cvmx_sso_wq_iq_dis {
2150232809Sjmallett	uint64_t u64;
2151232809Sjmallett	struct cvmx_sso_wq_iq_dis_s {
2152232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
2153232809Sjmallett	uint64_t iq_dis                       : 64; /**< Input queue interrupt temporary disable mask
2154232809Sjmallett                                                         Corresponding SSO_WQ_INTX[WQ_INT<X>] bit cannot be
2155232809Sjmallett                                                         set due to IQ_CNT/IQ_THR check when this bit is set.
2156232809Sjmallett                                                         Corresponding IQ_DIS bit is cleared by HW whenever:
2157232809Sjmallett                                                          - SSO_WQ_INT_CNTX[IQ_CNT] is zero, or
2158232809Sjmallett                                                          - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
2159232809Sjmallett                                                            counter SSO_WQ_INT_PC[PC]==0 */
2160232809Sjmallett#else
2161232809Sjmallett	uint64_t iq_dis                       : 64;
2162232809Sjmallett#endif
2163232809Sjmallett	} s;
2164232809Sjmallett	struct cvmx_sso_wq_iq_dis_s           cn68xx;
2165232809Sjmallett	struct cvmx_sso_wq_iq_dis_s           cn68xxp1;
2166232809Sjmallett};
2167232809Sjmalletttypedef union cvmx_sso_wq_iq_dis cvmx_sso_wq_iq_dis_t;
2168232809Sjmallett
2169232809Sjmallett/**
2170232809Sjmallett * cvmx_sso_ws_pc#
2171232809Sjmallett *
2172232809Sjmallett * CSR reserved addresses: (225): 0x3100..0x3800
2173232809Sjmallett * CSR align addresses: ===========================================================================================================
2174232809Sjmallett * SSO_WS_PCX = SSO Work Schedule Performance Counter
2175232809Sjmallett *              (one per group)
2176232809Sjmallett *
2177232809Sjmallett * Counts the number of work schedules for each group.
2178232809Sjmallett * Counter rolls over through zero when max value exceeded.
2179232809Sjmallett */
2180232809Sjmallettunion cvmx_sso_ws_pcx {
2181232809Sjmallett	uint64_t u64;
2182232809Sjmallett	struct cvmx_sso_ws_pcx_s {
2183232809Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
2184232809Sjmallett	uint64_t ws_pc                        : 64; /**< Work schedule performance counter for group X */
2185232809Sjmallett#else
2186232809Sjmallett	uint64_t ws_pc                        : 64;
2187232809Sjmallett#endif
2188232809Sjmallett	} s;
2189232809Sjmallett	struct cvmx_sso_ws_pcx_s              cn68xx;
2190232809Sjmallett	struct cvmx_sso_ws_pcx_s              cn68xxp1;
2191232809Sjmallett};
2192232809Sjmalletttypedef union cvmx_sso_ws_pcx cvmx_sso_ws_pcx_t;
2193232809Sjmallett
2194232809Sjmallett#endif
2195