1/***********************license start*************** 2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Inc. nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-l2c-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon l2c. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_L2C_DEFS_H__ 53#define __CVMX_L2C_DEFS_H__ 54 55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56#define CVMX_L2C_BIG_CTL CVMX_L2C_BIG_CTL_FUNC() 57static inline uint64_t CVMX_L2C_BIG_CTL_FUNC(void) 58{ 59 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 60 cvmx_warn("CVMX_L2C_BIG_CTL not supported on this chip\n"); 61 return CVMX_ADD_IO_SEG(0x0001180080800030ull); 62} 63#else 64#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull)) 65#endif 66#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 67#define CVMX_L2C_BST CVMX_L2C_BST_FUNC() 68static inline uint64_t CVMX_L2C_BST_FUNC(void) 69{ 70 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 71 cvmx_warn("CVMX_L2C_BST not supported on this chip\n"); 72 return CVMX_ADD_IO_SEG(0x00011800808007F8ull); 73} 74#else 75#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull)) 76#endif 77#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 78#define CVMX_L2C_BST0 CVMX_L2C_BST0_FUNC() 79static inline uint64_t CVMX_L2C_BST0_FUNC(void) 80{ 81 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 82 cvmx_warn("CVMX_L2C_BST0 not supported on this chip\n"); 83 return CVMX_ADD_IO_SEG(0x00011800800007F8ull); 84} 85#else 86#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull)) 87#endif 88#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 89#define CVMX_L2C_BST1 CVMX_L2C_BST1_FUNC() 90static inline uint64_t CVMX_L2C_BST1_FUNC(void) 91{ 92 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 93 cvmx_warn("CVMX_L2C_BST1 not supported on this chip\n"); 94 return CVMX_ADD_IO_SEG(0x00011800800007F0ull); 95} 96#else 97#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull)) 98#endif 99#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 100#define CVMX_L2C_BST2 CVMX_L2C_BST2_FUNC() 101static inline uint64_t CVMX_L2C_BST2_FUNC(void) 102{ 103 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 104 cvmx_warn("CVMX_L2C_BST2 not supported on this chip\n"); 105 return CVMX_ADD_IO_SEG(0x00011800800007E8ull); 106} 107#else 108#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull)) 109#endif 110#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 111static inline uint64_t CVMX_L2C_BST_MEMX(unsigned long block_id) 112{ 113 if (!( 114 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 115 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 116 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 117 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 118 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 119 cvmx_warn("CVMX_L2C_BST_MEMX(%lu) is invalid on this chip\n", block_id); 120 return CVMX_ADD_IO_SEG(0x0001180080C007F8ull) + ((block_id) & 3) * 0x40000ull; 121} 122#else 123#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull) + ((block_id) & 3) * 0x40000ull) 124#endif 125#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 126static inline uint64_t CVMX_L2C_BST_TDTX(unsigned long block_id) 127{ 128 if (!( 129 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 130 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 131 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 132 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 133 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 134 cvmx_warn("CVMX_L2C_BST_TDTX(%lu) is invalid on this chip\n", block_id); 135 return CVMX_ADD_IO_SEG(0x0001180080A007F0ull) + ((block_id) & 3) * 0x40000ull; 136} 137#else 138#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull) + ((block_id) & 3) * 0x40000ull) 139#endif 140#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 141static inline uint64_t CVMX_L2C_BST_TTGX(unsigned long block_id) 142{ 143 if (!( 144 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 145 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 146 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 147 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 148 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 149 cvmx_warn("CVMX_L2C_BST_TTGX(%lu) is invalid on this chip\n", block_id); 150 return CVMX_ADD_IO_SEG(0x0001180080A007F8ull) + ((block_id) & 3) * 0x40000ull; 151} 152#else 153#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull) + ((block_id) & 3) * 0x40000ull) 154#endif 155#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 156#define CVMX_L2C_CFG CVMX_L2C_CFG_FUNC() 157static inline uint64_t CVMX_L2C_CFG_FUNC(void) 158{ 159 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 160 cvmx_warn("CVMX_L2C_CFG not supported on this chip\n"); 161 return CVMX_ADD_IO_SEG(0x0001180080000000ull); 162} 163#else 164#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull)) 165#endif 166#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 167static inline uint64_t CVMX_L2C_COP0_MAPX(unsigned long offset) 168{ 169 if (!( 170 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1023) || ((offset >= 16128) && (offset <= 16383)))) || 171 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1535) || ((offset >= 16128) && (offset <= 16383)))) || 172 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 2559) || ((offset >= 16128) && (offset <= 16383)))) || 173 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8191) || ((offset >= 16128) && (offset <= 16383)))) || 174 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1023) || ((offset >= 16128) && (offset <= 16383)))))) 175 cvmx_warn("CVMX_L2C_COP0_MAPX(%lu) is invalid on this chip\n", offset); 176 return CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8; 177} 178#else 179#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8) 180#endif 181#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 182#define CVMX_L2C_CTL CVMX_L2C_CTL_FUNC() 183static inline uint64_t CVMX_L2C_CTL_FUNC(void) 184{ 185 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 186 cvmx_warn("CVMX_L2C_CTL not supported on this chip\n"); 187 return CVMX_ADD_IO_SEG(0x0001180080800000ull); 188} 189#else 190#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull)) 191#endif 192#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 193#define CVMX_L2C_DBG CVMX_L2C_DBG_FUNC() 194static inline uint64_t CVMX_L2C_DBG_FUNC(void) 195{ 196 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 197 cvmx_warn("CVMX_L2C_DBG not supported on this chip\n"); 198 return CVMX_ADD_IO_SEG(0x0001180080000030ull); 199} 200#else 201#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull)) 202#endif 203#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 204#define CVMX_L2C_DUT CVMX_L2C_DUT_FUNC() 205static inline uint64_t CVMX_L2C_DUT_FUNC(void) 206{ 207 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 208 cvmx_warn("CVMX_L2C_DUT not supported on this chip\n"); 209 return CVMX_ADD_IO_SEG(0x0001180080000050ull); 210} 211#else 212#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull)) 213#endif 214#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 215static inline uint64_t CVMX_L2C_DUT_MAPX(unsigned long offset) 216{ 217 if (!( 218 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1023))) || 219 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1535))) || 220 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 2559))) || 221 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 8191))) || 222 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1023))))) 223 cvmx_warn("CVMX_L2C_DUT_MAPX(%lu) is invalid on this chip\n", offset); 224 return CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 8191) * 8; 225} 226#else 227#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 8191) * 8) 228#endif 229#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 230static inline uint64_t CVMX_L2C_ERR_TDTX(unsigned long block_id) 231{ 232 if (!( 233 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 234 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 235 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 236 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 237 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 238 cvmx_warn("CVMX_L2C_ERR_TDTX(%lu) is invalid on this chip\n", block_id); 239 return CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull; 240} 241#else 242#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull) 243#endif 244#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 245static inline uint64_t CVMX_L2C_ERR_TTGX(unsigned long block_id) 246{ 247 if (!( 248 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 249 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 250 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 251 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 252 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 253 cvmx_warn("CVMX_L2C_ERR_TTGX(%lu) is invalid on this chip\n", block_id); 254 return CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull; 255} 256#else 257#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull) 258#endif 259#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 260static inline uint64_t CVMX_L2C_ERR_VBFX(unsigned long block_id) 261{ 262 if (!( 263 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 264 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 265 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 266 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 267 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 268 cvmx_warn("CVMX_L2C_ERR_VBFX(%lu) is invalid on this chip\n", block_id); 269 return CVMX_ADD_IO_SEG(0x0001180080C007F0ull) + ((block_id) & 3) * 0x40000ull; 270} 271#else 272#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull) + ((block_id) & 3) * 0x40000ull) 273#endif 274#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 275#define CVMX_L2C_ERR_XMC CVMX_L2C_ERR_XMC_FUNC() 276static inline uint64_t CVMX_L2C_ERR_XMC_FUNC(void) 277{ 278 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 279 cvmx_warn("CVMX_L2C_ERR_XMC not supported on this chip\n"); 280 return CVMX_ADD_IO_SEG(0x00011800808007D8ull); 281} 282#else 283#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull)) 284#endif 285#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 286#define CVMX_L2C_GRPWRR0 CVMX_L2C_GRPWRR0_FUNC() 287static inline uint64_t CVMX_L2C_GRPWRR0_FUNC(void) 288{ 289 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 290 cvmx_warn("CVMX_L2C_GRPWRR0 not supported on this chip\n"); 291 return CVMX_ADD_IO_SEG(0x00011800800000C8ull); 292} 293#else 294#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull)) 295#endif 296#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 297#define CVMX_L2C_GRPWRR1 CVMX_L2C_GRPWRR1_FUNC() 298static inline uint64_t CVMX_L2C_GRPWRR1_FUNC(void) 299{ 300 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 301 cvmx_warn("CVMX_L2C_GRPWRR1 not supported on this chip\n"); 302 return CVMX_ADD_IO_SEG(0x00011800800000D0ull); 303} 304#else 305#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull)) 306#endif 307#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 308#define CVMX_L2C_INT_EN CVMX_L2C_INT_EN_FUNC() 309static inline uint64_t CVMX_L2C_INT_EN_FUNC(void) 310{ 311 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 312 cvmx_warn("CVMX_L2C_INT_EN not supported on this chip\n"); 313 return CVMX_ADD_IO_SEG(0x0001180080000100ull); 314} 315#else 316#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull)) 317#endif 318#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 319#define CVMX_L2C_INT_ENA CVMX_L2C_INT_ENA_FUNC() 320static inline uint64_t CVMX_L2C_INT_ENA_FUNC(void) 321{ 322 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 323 cvmx_warn("CVMX_L2C_INT_ENA not supported on this chip\n"); 324 return CVMX_ADD_IO_SEG(0x0001180080800020ull); 325} 326#else 327#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull)) 328#endif 329#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 330#define CVMX_L2C_INT_REG CVMX_L2C_INT_REG_FUNC() 331static inline uint64_t CVMX_L2C_INT_REG_FUNC(void) 332{ 333 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 334 cvmx_warn("CVMX_L2C_INT_REG not supported on this chip\n"); 335 return CVMX_ADD_IO_SEG(0x0001180080800018ull); 336} 337#else 338#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull)) 339#endif 340#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 341#define CVMX_L2C_INT_STAT CVMX_L2C_INT_STAT_FUNC() 342static inline uint64_t CVMX_L2C_INT_STAT_FUNC(void) 343{ 344 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 345 cvmx_warn("CVMX_L2C_INT_STAT not supported on this chip\n"); 346 return CVMX_ADD_IO_SEG(0x00011800800000F8ull); 347} 348#else 349#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull)) 350#endif 351#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 352static inline uint64_t CVMX_L2C_IOCX_PFC(unsigned long block_id) 353{ 354 if (!( 355 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 356 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 357 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 358 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) || 359 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 360 cvmx_warn("CVMX_L2C_IOCX_PFC(%lu) is invalid on this chip\n", block_id); 361 return CVMX_ADD_IO_SEG(0x0001180080800420ull); 362} 363#else 364#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull)) 365#endif 366#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 367static inline uint64_t CVMX_L2C_IORX_PFC(unsigned long block_id) 368{ 369 if (!( 370 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 371 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 372 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 373 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id == 0))) || 374 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 375 cvmx_warn("CVMX_L2C_IORX_PFC(%lu) is invalid on this chip\n", block_id); 376 return CVMX_ADD_IO_SEG(0x0001180080800428ull); 377} 378#else 379#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull)) 380#endif 381#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 382#define CVMX_L2C_LCKBASE CVMX_L2C_LCKBASE_FUNC() 383static inline uint64_t CVMX_L2C_LCKBASE_FUNC(void) 384{ 385 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 386 cvmx_warn("CVMX_L2C_LCKBASE not supported on this chip\n"); 387 return CVMX_ADD_IO_SEG(0x0001180080000058ull); 388} 389#else 390#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull)) 391#endif 392#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 393#define CVMX_L2C_LCKOFF CVMX_L2C_LCKOFF_FUNC() 394static inline uint64_t CVMX_L2C_LCKOFF_FUNC(void) 395{ 396 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 397 cvmx_warn("CVMX_L2C_LCKOFF not supported on this chip\n"); 398 return CVMX_ADD_IO_SEG(0x0001180080000060ull); 399} 400#else 401#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull)) 402#endif 403#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 404#define CVMX_L2C_LFB0 CVMX_L2C_LFB0_FUNC() 405static inline uint64_t CVMX_L2C_LFB0_FUNC(void) 406{ 407 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 408 cvmx_warn("CVMX_L2C_LFB0 not supported on this chip\n"); 409 return CVMX_ADD_IO_SEG(0x0001180080000038ull); 410} 411#else 412#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull)) 413#endif 414#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 415#define CVMX_L2C_LFB1 CVMX_L2C_LFB1_FUNC() 416static inline uint64_t CVMX_L2C_LFB1_FUNC(void) 417{ 418 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 419 cvmx_warn("CVMX_L2C_LFB1 not supported on this chip\n"); 420 return CVMX_ADD_IO_SEG(0x0001180080000040ull); 421} 422#else 423#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull)) 424#endif 425#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 426#define CVMX_L2C_LFB2 CVMX_L2C_LFB2_FUNC() 427static inline uint64_t CVMX_L2C_LFB2_FUNC(void) 428{ 429 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 430 cvmx_warn("CVMX_L2C_LFB2 not supported on this chip\n"); 431 return CVMX_ADD_IO_SEG(0x0001180080000048ull); 432} 433#else 434#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull)) 435#endif 436#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 437#define CVMX_L2C_LFB3 CVMX_L2C_LFB3_FUNC() 438static inline uint64_t CVMX_L2C_LFB3_FUNC(void) 439{ 440 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 441 cvmx_warn("CVMX_L2C_LFB3 not supported on this chip\n"); 442 return CVMX_ADD_IO_SEG(0x00011800800000B8ull); 443} 444#else 445#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull)) 446#endif 447#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 448#define CVMX_L2C_OOB CVMX_L2C_OOB_FUNC() 449static inline uint64_t CVMX_L2C_OOB_FUNC(void) 450{ 451 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 452 cvmx_warn("CVMX_L2C_OOB not supported on this chip\n"); 453 return CVMX_ADD_IO_SEG(0x00011800800000D8ull); 454} 455#else 456#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull)) 457#endif 458#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 459#define CVMX_L2C_OOB1 CVMX_L2C_OOB1_FUNC() 460static inline uint64_t CVMX_L2C_OOB1_FUNC(void) 461{ 462 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 463 cvmx_warn("CVMX_L2C_OOB1 not supported on this chip\n"); 464 return CVMX_ADD_IO_SEG(0x00011800800000E0ull); 465} 466#else 467#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull)) 468#endif 469#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 470#define CVMX_L2C_OOB2 CVMX_L2C_OOB2_FUNC() 471static inline uint64_t CVMX_L2C_OOB2_FUNC(void) 472{ 473 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 474 cvmx_warn("CVMX_L2C_OOB2 not supported on this chip\n"); 475 return CVMX_ADD_IO_SEG(0x00011800800000E8ull); 476} 477#else 478#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull)) 479#endif 480#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 481#define CVMX_L2C_OOB3 CVMX_L2C_OOB3_FUNC() 482static inline uint64_t CVMX_L2C_OOB3_FUNC(void) 483{ 484 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 485 cvmx_warn("CVMX_L2C_OOB3 not supported on this chip\n"); 486 return CVMX_ADD_IO_SEG(0x00011800800000F0ull); 487} 488#else 489#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull)) 490#endif 491#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0) 492#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1) 493#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2) 494#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3) 495#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 496#define CVMX_L2C_PFCTL CVMX_L2C_PFCTL_FUNC() 497static inline uint64_t CVMX_L2C_PFCTL_FUNC(void) 498{ 499 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 500 cvmx_warn("CVMX_L2C_PFCTL not supported on this chip\n"); 501 return CVMX_ADD_IO_SEG(0x0001180080000090ull); 502} 503#else 504#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull)) 505#endif 506#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 507static inline uint64_t CVMX_L2C_PFCX(unsigned long offset) 508{ 509 if (!( 510 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 3))) || 511 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 3))) || 512 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 3))) || 513 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 3))) || 514 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) || 515 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) || 516 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 3))))) 517 cvmx_warn("CVMX_L2C_PFCX(%lu) is invalid on this chip\n", offset); 518 return CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8; 519} 520#else 521#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8) 522#endif 523#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 524#define CVMX_L2C_PPGRP CVMX_L2C_PPGRP_FUNC() 525static inline uint64_t CVMX_L2C_PPGRP_FUNC(void) 526{ 527 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))) 528 cvmx_warn("CVMX_L2C_PPGRP not supported on this chip\n"); 529 return CVMX_ADD_IO_SEG(0x00011800800000C0ull); 530} 531#else 532#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull)) 533#endif 534#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 535static inline uint64_t CVMX_L2C_QOS_IOBX(unsigned long offset) 536{ 537 if (!( 538 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 539 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 540 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 541 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) || 542 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 543 cvmx_warn("CVMX_L2C_QOS_IOBX(%lu) is invalid on this chip\n", offset); 544 return CVMX_ADD_IO_SEG(0x0001180080880200ull) + ((offset) & 1) * 8; 545} 546#else 547#define CVMX_L2C_QOS_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080880200ull) + ((offset) & 1) * 8) 548#endif 549#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 550static inline uint64_t CVMX_L2C_QOS_PPX(unsigned long offset) 551{ 552 if (!( 553 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) || 554 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) || 555 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) || 556 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) || 557 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3))))) 558 cvmx_warn("CVMX_L2C_QOS_PPX(%lu) is invalid on this chip\n", offset); 559 return CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 31) * 8; 560} 561#else 562#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 31) * 8) 563#endif 564#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 565#define CVMX_L2C_QOS_WGT CVMX_L2C_QOS_WGT_FUNC() 566static inline uint64_t CVMX_L2C_QOS_WGT_FUNC(void) 567{ 568 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 569 cvmx_warn("CVMX_L2C_QOS_WGT not supported on this chip\n"); 570 return CVMX_ADD_IO_SEG(0x0001180080800008ull); 571} 572#else 573#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull)) 574#endif 575#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 576static inline uint64_t CVMX_L2C_RSCX_PFC(unsigned long offset) 577{ 578 if (!( 579 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 580 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 581 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 582 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) || 583 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 584 cvmx_warn("CVMX_L2C_RSCX_PFC(%lu) is invalid on this chip\n", offset); 585 return CVMX_ADD_IO_SEG(0x0001180080800410ull) + ((offset) & 3) * 64; 586} 587#else 588#define CVMX_L2C_RSCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800410ull) + ((offset) & 3) * 64) 589#endif 590#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 591static inline uint64_t CVMX_L2C_RSDX_PFC(unsigned long offset) 592{ 593 if (!( 594 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 595 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 596 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 597 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) || 598 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 599 cvmx_warn("CVMX_L2C_RSDX_PFC(%lu) is invalid on this chip\n", offset); 600 return CVMX_ADD_IO_SEG(0x0001180080800418ull) + ((offset) & 3) * 64; 601} 602#else 603#define CVMX_L2C_RSDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800418ull) + ((offset) & 3) * 64) 604#endif 605#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 606#define CVMX_L2C_SPAR0 CVMX_L2C_SPAR0_FUNC() 607static inline uint64_t CVMX_L2C_SPAR0_FUNC(void) 608{ 609 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 610 cvmx_warn("CVMX_L2C_SPAR0 not supported on this chip\n"); 611 return CVMX_ADD_IO_SEG(0x0001180080000068ull); 612} 613#else 614#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull)) 615#endif 616#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 617#define CVMX_L2C_SPAR1 CVMX_L2C_SPAR1_FUNC() 618static inline uint64_t CVMX_L2C_SPAR1_FUNC(void) 619{ 620 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) 621 cvmx_warn("CVMX_L2C_SPAR1 not supported on this chip\n"); 622 return CVMX_ADD_IO_SEG(0x0001180080000070ull); 623} 624#else 625#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull)) 626#endif 627#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 628#define CVMX_L2C_SPAR2 CVMX_L2C_SPAR2_FUNC() 629static inline uint64_t CVMX_L2C_SPAR2_FUNC(void) 630{ 631 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) 632 cvmx_warn("CVMX_L2C_SPAR2 not supported on this chip\n"); 633 return CVMX_ADD_IO_SEG(0x0001180080000078ull); 634} 635#else 636#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull)) 637#endif 638#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 639#define CVMX_L2C_SPAR3 CVMX_L2C_SPAR3_FUNC() 640static inline uint64_t CVMX_L2C_SPAR3_FUNC(void) 641{ 642 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) 643 cvmx_warn("CVMX_L2C_SPAR3 not supported on this chip\n"); 644 return CVMX_ADD_IO_SEG(0x0001180080000080ull); 645} 646#else 647#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull)) 648#endif 649#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 650#define CVMX_L2C_SPAR4 CVMX_L2C_SPAR4_FUNC() 651static inline uint64_t CVMX_L2C_SPAR4_FUNC(void) 652{ 653 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))) 654 cvmx_warn("CVMX_L2C_SPAR4 not supported on this chip\n"); 655 return CVMX_ADD_IO_SEG(0x0001180080000088ull); 656} 657#else 658#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull)) 659#endif 660#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 661static inline uint64_t CVMX_L2C_TADX_ECC0(unsigned long block_id) 662{ 663 if (!( 664 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 665 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 666 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 667 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 668 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 669 cvmx_warn("CVMX_L2C_TADX_ECC0(%lu) is invalid on this chip\n", block_id); 670 return CVMX_ADD_IO_SEG(0x0001180080A00018ull) + ((block_id) & 3) * 0x40000ull; 671} 672#else 673#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull) + ((block_id) & 3) * 0x40000ull) 674#endif 675#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 676static inline uint64_t CVMX_L2C_TADX_ECC1(unsigned long block_id) 677{ 678 if (!( 679 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 680 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 681 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 682 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 683 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 684 cvmx_warn("CVMX_L2C_TADX_ECC1(%lu) is invalid on this chip\n", block_id); 685 return CVMX_ADD_IO_SEG(0x0001180080A00020ull) + ((block_id) & 3) * 0x40000ull; 686} 687#else 688#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull) + ((block_id) & 3) * 0x40000ull) 689#endif 690#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 691static inline uint64_t CVMX_L2C_TADX_IEN(unsigned long block_id) 692{ 693 if (!( 694 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 695 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 696 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 697 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 698 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 699 cvmx_warn("CVMX_L2C_TADX_IEN(%lu) is invalid on this chip\n", block_id); 700 return CVMX_ADD_IO_SEG(0x0001180080A00000ull) + ((block_id) & 3) * 0x40000ull; 701} 702#else 703#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull) + ((block_id) & 3) * 0x40000ull) 704#endif 705#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 706static inline uint64_t CVMX_L2C_TADX_INT(unsigned long block_id) 707{ 708 if (!( 709 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 710 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 711 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 712 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 713 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 714 cvmx_warn("CVMX_L2C_TADX_INT(%lu) is invalid on this chip\n", block_id); 715 return CVMX_ADD_IO_SEG(0x0001180080A00028ull) + ((block_id) & 3) * 0x40000ull; 716} 717#else 718#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull) + ((block_id) & 3) * 0x40000ull) 719#endif 720#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 721static inline uint64_t CVMX_L2C_TADX_PFC0(unsigned long block_id) 722{ 723 if (!( 724 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 725 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 726 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 727 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 728 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 729 cvmx_warn("CVMX_L2C_TADX_PFC0(%lu) is invalid on this chip\n", block_id); 730 return CVMX_ADD_IO_SEG(0x0001180080A00400ull) + ((block_id) & 3) * 0x40000ull; 731} 732#else 733#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + ((block_id) & 3) * 0x40000ull) 734#endif 735#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 736static inline uint64_t CVMX_L2C_TADX_PFC1(unsigned long block_id) 737{ 738 if (!( 739 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 740 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 741 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 742 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 743 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 744 cvmx_warn("CVMX_L2C_TADX_PFC1(%lu) is invalid on this chip\n", block_id); 745 return CVMX_ADD_IO_SEG(0x0001180080A00408ull) + ((block_id) & 3) * 0x40000ull; 746} 747#else 748#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + ((block_id) & 3) * 0x40000ull) 749#endif 750#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 751static inline uint64_t CVMX_L2C_TADX_PFC2(unsigned long block_id) 752{ 753 if (!( 754 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 755 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 756 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 757 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 758 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 759 cvmx_warn("CVMX_L2C_TADX_PFC2(%lu) is invalid on this chip\n", block_id); 760 return CVMX_ADD_IO_SEG(0x0001180080A00410ull) + ((block_id) & 3) * 0x40000ull; 761} 762#else 763#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + ((block_id) & 3) * 0x40000ull) 764#endif 765#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 766static inline uint64_t CVMX_L2C_TADX_PFC3(unsigned long block_id) 767{ 768 if (!( 769 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 770 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 771 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 772 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 773 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 774 cvmx_warn("CVMX_L2C_TADX_PFC3(%lu) is invalid on this chip\n", block_id); 775 return CVMX_ADD_IO_SEG(0x0001180080A00418ull) + ((block_id) & 3) * 0x40000ull; 776} 777#else 778#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + ((block_id) & 3) * 0x40000ull) 779#endif 780#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 781static inline uint64_t CVMX_L2C_TADX_PRF(unsigned long block_id) 782{ 783 if (!( 784 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 785 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 786 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 787 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 788 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 789 cvmx_warn("CVMX_L2C_TADX_PRF(%lu) is invalid on this chip\n", block_id); 790 return CVMX_ADD_IO_SEG(0x0001180080A00008ull) + ((block_id) & 3) * 0x40000ull; 791} 792#else 793#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + ((block_id) & 3) * 0x40000ull) 794#endif 795#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 796static inline uint64_t CVMX_L2C_TADX_TAG(unsigned long block_id) 797{ 798 if (!( 799 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 800 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 801 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 802 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 803 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 804 cvmx_warn("CVMX_L2C_TADX_TAG(%lu) is invalid on this chip\n", block_id); 805 return CVMX_ADD_IO_SEG(0x0001180080A00010ull) + ((block_id) & 3) * 0x40000ull; 806} 807#else 808#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + ((block_id) & 3) * 0x40000ull) 809#endif 810#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 811#define CVMX_L2C_VER_ID CVMX_L2C_VER_ID_FUNC() 812static inline uint64_t CVMX_L2C_VER_ID_FUNC(void) 813{ 814 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 815 cvmx_warn("CVMX_L2C_VER_ID not supported on this chip\n"); 816 return CVMX_ADD_IO_SEG(0x00011800808007E0ull); 817} 818#else 819#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull)) 820#endif 821#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 822#define CVMX_L2C_VER_IOB CVMX_L2C_VER_IOB_FUNC() 823static inline uint64_t CVMX_L2C_VER_IOB_FUNC(void) 824{ 825 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 826 cvmx_warn("CVMX_L2C_VER_IOB not supported on this chip\n"); 827 return CVMX_ADD_IO_SEG(0x00011800808007F0ull); 828} 829#else 830#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull)) 831#endif 832#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 833#define CVMX_L2C_VER_MSC CVMX_L2C_VER_MSC_FUNC() 834static inline uint64_t CVMX_L2C_VER_MSC_FUNC(void) 835{ 836 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 837 cvmx_warn("CVMX_L2C_VER_MSC not supported on this chip\n"); 838 return CVMX_ADD_IO_SEG(0x00011800808007D0ull); 839} 840#else 841#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull)) 842#endif 843#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 844#define CVMX_L2C_VER_PP CVMX_L2C_VER_PP_FUNC() 845static inline uint64_t CVMX_L2C_VER_PP_FUNC(void) 846{ 847 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 848 cvmx_warn("CVMX_L2C_VER_PP not supported on this chip\n"); 849 return CVMX_ADD_IO_SEG(0x00011800808007E8ull); 850} 851#else 852#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull)) 853#endif 854#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 855static inline uint64_t CVMX_L2C_VIRTID_IOBX(unsigned long offset) 856{ 857 if (!( 858 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 859 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 860 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 861 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) || 862 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 863 cvmx_warn("CVMX_L2C_VIRTID_IOBX(%lu) is invalid on this chip\n", offset); 864 return CVMX_ADD_IO_SEG(0x00011800808C0200ull) + ((offset) & 1) * 8; 865} 866#else 867#define CVMX_L2C_VIRTID_IOBX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0200ull) + ((offset) & 1) * 8) 868#endif 869#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 870static inline uint64_t CVMX_L2C_VIRTID_PPX(unsigned long offset) 871{ 872 if (!( 873 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) || 874 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) || 875 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) || 876 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) || 877 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3))))) 878 cvmx_warn("CVMX_L2C_VIRTID_PPX(%lu) is invalid on this chip\n", offset); 879 return CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 31) * 8; 880} 881#else 882#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 31) * 8) 883#endif 884#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 885#define CVMX_L2C_VRT_CTL CVMX_L2C_VRT_CTL_FUNC() 886static inline uint64_t CVMX_L2C_VRT_CTL_FUNC(void) 887{ 888 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 889 cvmx_warn("CVMX_L2C_VRT_CTL not supported on this chip\n"); 890 return CVMX_ADD_IO_SEG(0x0001180080800010ull); 891} 892#else 893#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull)) 894#endif 895#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 896static inline uint64_t CVMX_L2C_VRT_MEMX(unsigned long offset) 897{ 898 if (!( 899 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1023))) || 900 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1023))) || 901 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1023))) || 902 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1023))) || 903 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1023))))) 904 cvmx_warn("CVMX_L2C_VRT_MEMX(%lu) is invalid on this chip\n", offset); 905 return CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8; 906} 907#else 908#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8) 909#endif 910#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 911static inline uint64_t CVMX_L2C_WPAR_IOBX(unsigned long offset) 912{ 913 if (!( 914 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 915 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 916 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 917 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) || 918 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 919 cvmx_warn("CVMX_L2C_WPAR_IOBX(%lu) is invalid on this chip\n", offset); 920 return CVMX_ADD_IO_SEG(0x0001180080840200ull) + ((offset) & 1) * 8; 921} 922#else 923#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + ((offset) & 1) * 8) 924#endif 925#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 926static inline uint64_t CVMX_L2C_WPAR_PPX(unsigned long offset) 927{ 928 if (!( 929 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) || 930 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5))) || 931 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 9))) || 932 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31))) || 933 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3))))) 934 cvmx_warn("CVMX_L2C_WPAR_PPX(%lu) is invalid on this chip\n", offset); 935 return CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 31) * 8; 936} 937#else 938#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 31) * 8) 939#endif 940#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 941static inline uint64_t CVMX_L2C_XMCX_PFC(unsigned long offset) 942{ 943 if (!( 944 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 945 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 946 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 947 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) || 948 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 949 cvmx_warn("CVMX_L2C_XMCX_PFC(%lu) is invalid on this chip\n", offset); 950 return CVMX_ADD_IO_SEG(0x0001180080800400ull) + ((offset) & 3) * 64; 951} 952#else 953#define CVMX_L2C_XMCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800400ull) + ((offset) & 3) * 64) 954#endif 955#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 956#define CVMX_L2C_XMC_CMD CVMX_L2C_XMC_CMD_FUNC() 957static inline uint64_t CVMX_L2C_XMC_CMD_FUNC(void) 958{ 959 if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))) 960 cvmx_warn("CVMX_L2C_XMC_CMD not supported on this chip\n"); 961 return CVMX_ADD_IO_SEG(0x0001180080800028ull); 962} 963#else 964#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull)) 965#endif 966#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 967static inline uint64_t CVMX_L2C_XMDX_PFC(unsigned long offset) 968{ 969 if (!( 970 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset == 0))) || 971 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset == 0))) || 972 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset == 0))) || 973 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) || 974 (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset == 0))))) 975 cvmx_warn("CVMX_L2C_XMDX_PFC(%lu) is invalid on this chip\n", offset); 976 return CVMX_ADD_IO_SEG(0x0001180080800408ull) + ((offset) & 3) * 64; 977} 978#else 979#define CVMX_L2C_XMDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800408ull) + ((offset) & 3) * 64) 980#endif 981 982/** 983 * cvmx_l2c_big_ctl 984 * 985 * L2C_BIG_CTL = L2C Big memory control register 986 * 987 * 988 * Notes: 989 * (1) BIGRD interrupts can occur during normal operation as the PP's are allowed to prefetch to 990 * non-existent memory locations. Therefore, BIGRD is for informational purposes only. 991 * 992 * (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB, and L2C_VER_MSC will be 993 * loaded just like a store which is blocked by VRTWR. Additionally, L2C_ERR_XMC will be loaded. 994 */ 995union cvmx_l2c_big_ctl { 996 uint64_t u64; 997 struct cvmx_l2c_big_ctl_s { 998#ifdef __BIG_ENDIAN_BITFIELD 999 uint64_t reserved_8_63 : 56; 1000 uint64_t maxdram : 4; /**< Amount of configured DRAM 1001 0 = reserved 1002 1 = 512MB 1003 2 = 1GB 1004 3 = 2GB 1005 4 = 4GB 1006 5 = 8GB 1007 6 = 16GB 1008 7 = 32GB 1009 8 = 64GB (**reserved in 63xx**) 1010 9 = 128GB (**reserved in 63xx**) 1011 10-15 reserved 1012 Violations of this limit causes 1013 L2C to set L2C_INT_REG[BIGRD/BIGWR]. */ 1014 uint64_t reserved_1_3 : 3; 1015 uint64_t disable : 1; /**< When set, disables the BIGWR/BIGRD logic completely 1016 and reverts HOLEWR to 63xx pass 1.x behavior. 1017 When clear, BIGWR and HOLEWR block stores in the same 1018 same manner as the VRT logic, and BIGRD is reported. */ 1019#else 1020 uint64_t disable : 1; 1021 uint64_t reserved_1_3 : 3; 1022 uint64_t maxdram : 4; 1023 uint64_t reserved_8_63 : 56; 1024#endif 1025 } s; 1026 struct cvmx_l2c_big_ctl_s cn61xx; 1027 struct cvmx_l2c_big_ctl_s cn63xx; 1028 struct cvmx_l2c_big_ctl_s cn66xx; 1029 struct cvmx_l2c_big_ctl_s cn68xx; 1030 struct cvmx_l2c_big_ctl_s cn68xxp1; 1031 struct cvmx_l2c_big_ctl_s cnf71xx; 1032}; 1033typedef union cvmx_l2c_big_ctl cvmx_l2c_big_ctl_t; 1034 1035/** 1036 * cvmx_l2c_bst 1037 * 1038 * L2C_BST = L2C BIST Status 1039 * 1040 */ 1041union cvmx_l2c_bst { 1042 uint64_t u64; 1043 struct cvmx_l2c_bst_s { 1044#ifdef __BIG_ENDIAN_BITFIELD 1045 uint64_t dutfl : 32; /**< BIST failure status for PP0-3 DUT */ 1046 uint64_t rbffl : 4; /**< BIST failure status for RBF0-3 */ 1047 uint64_t xbffl : 4; /**< BIST failure status for XBF0-3 */ 1048 uint64_t tdpfl : 4; /**< BIST failure status for TDP0-3 */ 1049 uint64_t ioccmdfl : 4; /**< BIST failure status for IOCCMD */ 1050 uint64_t iocdatfl : 4; /**< BIST failure status for IOCDAT */ 1051 uint64_t dutresfl : 4; /**< BIST failure status for DUTRES */ 1052 uint64_t vrtfl : 4; /**< BIST failure status for VRT0 */ 1053 uint64_t tdffl : 4; /**< BIST failure status for TDF0 */ 1054#else 1055 uint64_t tdffl : 4; 1056 uint64_t vrtfl : 4; 1057 uint64_t dutresfl : 4; 1058 uint64_t iocdatfl : 4; 1059 uint64_t ioccmdfl : 4; 1060 uint64_t tdpfl : 4; 1061 uint64_t xbffl : 4; 1062 uint64_t rbffl : 4; 1063 uint64_t dutfl : 32; 1064#endif 1065 } s; 1066 struct cvmx_l2c_bst_cn61xx { 1067#ifdef __BIG_ENDIAN_BITFIELD 1068 uint64_t reserved_36_63 : 28; 1069 uint64_t dutfl : 4; /**< BIST failure status for PP0-3 DUT */ 1070 uint64_t reserved_17_31 : 15; 1071 uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */ 1072 uint64_t reserved_13_15 : 3; 1073 uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */ 1074 uint64_t reserved_9_11 : 3; 1075 uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */ 1076 uint64_t reserved_5_7 : 3; 1077 uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */ 1078 uint64_t reserved_1_3 : 3; 1079 uint64_t tdffl : 1; /**< BIST failure status for TDF0 */ 1080#else 1081 uint64_t tdffl : 1; 1082 uint64_t reserved_1_3 : 3; 1083 uint64_t vrtfl : 1; 1084 uint64_t reserved_5_7 : 3; 1085 uint64_t dutresfl : 1; 1086 uint64_t reserved_9_11 : 3; 1087 uint64_t iocdatfl : 1; 1088 uint64_t reserved_13_15 : 3; 1089 uint64_t ioccmdfl : 1; 1090 uint64_t reserved_17_31 : 15; 1091 uint64_t dutfl : 4; 1092 uint64_t reserved_36_63 : 28; 1093#endif 1094 } cn61xx; 1095 struct cvmx_l2c_bst_cn63xx { 1096#ifdef __BIG_ENDIAN_BITFIELD 1097 uint64_t reserved_38_63 : 26; 1098 uint64_t dutfl : 6; /**< BIST failure status for PP0-5 DUT */ 1099 uint64_t reserved_17_31 : 15; 1100 uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */ 1101 uint64_t reserved_13_15 : 3; 1102 uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */ 1103 uint64_t reserved_9_11 : 3; 1104 uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */ 1105 uint64_t reserved_5_7 : 3; 1106 uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */ 1107 uint64_t reserved_1_3 : 3; 1108 uint64_t tdffl : 1; /**< BIST failure status for TDF0 */ 1109#else 1110 uint64_t tdffl : 1; 1111 uint64_t reserved_1_3 : 3; 1112 uint64_t vrtfl : 1; 1113 uint64_t reserved_5_7 : 3; 1114 uint64_t dutresfl : 1; 1115 uint64_t reserved_9_11 : 3; 1116 uint64_t iocdatfl : 1; 1117 uint64_t reserved_13_15 : 3; 1118 uint64_t ioccmdfl : 1; 1119 uint64_t reserved_17_31 : 15; 1120 uint64_t dutfl : 6; 1121 uint64_t reserved_38_63 : 26; 1122#endif 1123 } cn63xx; 1124 struct cvmx_l2c_bst_cn63xx cn63xxp1; 1125 struct cvmx_l2c_bst_cn66xx { 1126#ifdef __BIG_ENDIAN_BITFIELD 1127 uint64_t reserved_42_63 : 22; 1128 uint64_t dutfl : 10; /**< BIST failure status for PP0-9 DUT */ 1129 uint64_t reserved_17_31 : 15; 1130 uint64_t ioccmdfl : 1; /**< BIST failure status for IOCCMD */ 1131 uint64_t reserved_13_15 : 3; 1132 uint64_t iocdatfl : 1; /**< BIST failure status for IOCDAT */ 1133 uint64_t reserved_9_11 : 3; 1134 uint64_t dutresfl : 1; /**< BIST failure status for DUTRES */ 1135 uint64_t reserved_5_7 : 3; 1136 uint64_t vrtfl : 1; /**< BIST failure status for VRT0 */ 1137 uint64_t reserved_1_3 : 3; 1138 uint64_t tdffl : 1; /**< BIST failure status for TDF0 */ 1139#else 1140 uint64_t tdffl : 1; 1141 uint64_t reserved_1_3 : 3; 1142 uint64_t vrtfl : 1; 1143 uint64_t reserved_5_7 : 3; 1144 uint64_t dutresfl : 1; 1145 uint64_t reserved_9_11 : 3; 1146 uint64_t iocdatfl : 1; 1147 uint64_t reserved_13_15 : 3; 1148 uint64_t ioccmdfl : 1; 1149 uint64_t reserved_17_31 : 15; 1150 uint64_t dutfl : 10; 1151 uint64_t reserved_42_63 : 22; 1152#endif 1153 } cn66xx; 1154 struct cvmx_l2c_bst_s cn68xx; 1155 struct cvmx_l2c_bst_s cn68xxp1; 1156 struct cvmx_l2c_bst_cn61xx cnf71xx; 1157}; 1158typedef union cvmx_l2c_bst cvmx_l2c_bst_t; 1159 1160/** 1161 * cvmx_l2c_bst0 1162 * 1163 * L2C_BST0 = L2C BIST 0 CTL/STAT 1164 * 1165 */ 1166union cvmx_l2c_bst0 { 1167 uint64_t u64; 1168 struct cvmx_l2c_bst0_s { 1169#ifdef __BIG_ENDIAN_BITFIELD 1170 uint64_t reserved_24_63 : 40; 1171 uint64_t dtbnk : 1; /**< DuTag Bank# 1172 When DT=1(BAD), this field provides additional information 1173 about which DuTag Bank (0/1) failed. */ 1174 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1175 - 0: GOOD (or bist in progress/never run) 1176 - 1: BAD */ 1177 uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure) 1178 [12]: i (0=FORWARD/1=REVERSE pass) 1179 [11:10]: j (Pattern# 1 of 4) 1180 [9:4]: k (DT Index 1 of 64) 1181 [3:0]: l (DT# 1 of 16 DTs) */ 1182 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1183 - 0: GOOD (or bist in progress/never run) 1184 - 1: BAD */ 1185 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1186 - 0: GOOD (or bist in progress/never run) 1187 - 1: BAD */ 1188 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1189 - 0: GOOD (or bist in progress/never run) 1190 - 1: BAD */ 1191#else 1192 uint64_t wlb_dat : 4; 1193 uint64_t stin_msk : 1; 1194 uint64_t dt : 1; 1195 uint64_t dtcnt : 13; 1196 uint64_t wlb_msk : 4; 1197 uint64_t dtbnk : 1; 1198 uint64_t reserved_24_63 : 40; 1199#endif 1200 } s; 1201 struct cvmx_l2c_bst0_cn30xx { 1202#ifdef __BIG_ENDIAN_BITFIELD 1203 uint64_t reserved_23_63 : 41; 1204 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1205 - 0: GOOD (or bist in progress/never run) 1206 - 1: BAD */ 1207 uint64_t reserved_15_18 : 4; 1208 uint64_t dtcnt : 9; /**< DuTag BiST Counter (used to help isolate the failure) 1209 [8]: i (0=FORWARD/1=REVERSE pass) 1210 [7:6]: j (Pattern# 1 of 4) 1211 [5:0]: k (DT Index 1 of 64) */ 1212 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1213 - 0: GOOD (or bist in progress/never run) 1214 - 1: BAD */ 1215 uint64_t reserved_4_4 : 1; 1216 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1217 - 0: GOOD (or bist in progress/never run) 1218 - 1: BAD */ 1219#else 1220 uint64_t wlb_dat : 4; 1221 uint64_t reserved_4_4 : 1; 1222 uint64_t dt : 1; 1223 uint64_t dtcnt : 9; 1224 uint64_t reserved_15_18 : 4; 1225 uint64_t wlb_msk : 4; 1226 uint64_t reserved_23_63 : 41; 1227#endif 1228 } cn30xx; 1229 struct cvmx_l2c_bst0_cn31xx { 1230#ifdef __BIG_ENDIAN_BITFIELD 1231 uint64_t reserved_23_63 : 41; 1232 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1233 - 0: GOOD (or bist in progress/never run) 1234 - 1: BAD */ 1235 uint64_t reserved_16_18 : 3; 1236 uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure) 1237 [9]: i (0=FORWARD/1=REVERSE pass) 1238 [8:7]: j (Pattern# 1 of 4) 1239 [6:1]: k (DT Index 1 of 64) 1240 [0]: l (DT# 1 of 2 DTs) */ 1241 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1242 - 0: GOOD (or bist in progress/never run) 1243 - 1: BAD */ 1244 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1245 - 0: GOOD (or bist in progress/never run) 1246 - 1: BAD */ 1247 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1248 - 0: GOOD (or bist in progress/never run) 1249 - 1: BAD */ 1250#else 1251 uint64_t wlb_dat : 4; 1252 uint64_t stin_msk : 1; 1253 uint64_t dt : 1; 1254 uint64_t dtcnt : 10; 1255 uint64_t reserved_16_18 : 3; 1256 uint64_t wlb_msk : 4; 1257 uint64_t reserved_23_63 : 41; 1258#endif 1259 } cn31xx; 1260 struct cvmx_l2c_bst0_cn38xx { 1261#ifdef __BIG_ENDIAN_BITFIELD 1262 uint64_t reserved_19_63 : 45; 1263 uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure) 1264 [12]: i (0=FORWARD/1=REVERSE pass) 1265 [11:10]: j (Pattern# 1 of 4) 1266 [9:4]: k (DT Index 1 of 64) 1267 [3:0]: l (DT# 1 of 16 DTs) */ 1268 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1269 - 0: GOOD (or bist in progress/never run) 1270 - 1: BAD */ 1271 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1272 - 0: GOOD (or bist in progress/never run) 1273 - 1: BAD */ 1274 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1275 - 0: GOOD (or bist in progress/never run) 1276 - 1: BAD */ 1277#else 1278 uint64_t wlb_dat : 4; 1279 uint64_t stin_msk : 1; 1280 uint64_t dt : 1; 1281 uint64_t dtcnt : 13; 1282 uint64_t reserved_19_63 : 45; 1283#endif 1284 } cn38xx; 1285 struct cvmx_l2c_bst0_cn38xx cn38xxp2; 1286 struct cvmx_l2c_bst0_cn50xx { 1287#ifdef __BIG_ENDIAN_BITFIELD 1288 uint64_t reserved_24_63 : 40; 1289 uint64_t dtbnk : 1; /**< DuTag Bank# 1290 When DT=1(BAD), this field provides additional information 1291 about which DuTag Bank (0/1) failed. */ 1292 uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] 1293 - 0: GOOD (or bist in progress/never run) 1294 - 1: BAD */ 1295 uint64_t reserved_16_18 : 3; 1296 uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure) 1297 [9]: i (0=FORWARD/1=REVERSE pass) 1298 [8:7]: j (Pattern# 1 of 4) 1299 [6:1]: k (DT Index 1 of 64) 1300 [0]: l (DT# 1 of 2 DTs) */ 1301 uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) 1302 - 0: GOOD (or bist in progress/never run) 1303 - 1: BAD */ 1304 uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM 1305 - 0: GOOD (or bist in progress/never run) 1306 - 1: BAD */ 1307 uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] 1308 - 0: GOOD (or bist in progress/never run) 1309 - 1: BAD */ 1310#else 1311 uint64_t wlb_dat : 4; 1312 uint64_t stin_msk : 1; 1313 uint64_t dt : 1; 1314 uint64_t dtcnt : 10; 1315 uint64_t reserved_16_18 : 3; 1316 uint64_t wlb_msk : 4; 1317 uint64_t dtbnk : 1; 1318 uint64_t reserved_24_63 : 40; 1319#endif 1320 } cn50xx; 1321 struct cvmx_l2c_bst0_cn50xx cn52xx; 1322 struct cvmx_l2c_bst0_cn50xx cn52xxp1; 1323 struct cvmx_l2c_bst0_s cn56xx; 1324 struct cvmx_l2c_bst0_s cn56xxp1; 1325 struct cvmx_l2c_bst0_s cn58xx; 1326 struct cvmx_l2c_bst0_s cn58xxp1; 1327}; 1328typedef union cvmx_l2c_bst0 cvmx_l2c_bst0_t; 1329 1330/** 1331 * cvmx_l2c_bst1 1332 * 1333 * L2C_BST1 = L2C BIST 1 CTL/STAT 1334 * 1335 */ 1336union cvmx_l2c_bst1 { 1337 uint64_t u64; 1338 struct cvmx_l2c_bst1_s { 1339#ifdef __BIG_ENDIAN_BITFIELD 1340 uint64_t reserved_9_63 : 55; 1341 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1342 - 0: GOOD (or bist in progress/never run) 1343 - 1: BAD */ 1344#else 1345 uint64_t l2t : 9; 1346 uint64_t reserved_9_63 : 55; 1347#endif 1348 } s; 1349 struct cvmx_l2c_bst1_cn30xx { 1350#ifdef __BIG_ENDIAN_BITFIELD 1351 uint64_t reserved_16_63 : 48; 1352 uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs 1353 - 0: GOOD (or bist in progress/never run) 1354 - 1: BAD */ 1355 uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC) 1356 - 0: GOOD (or bist in progress/never run) 1357 - 1: BAD */ 1358 uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM 1359 - 0: GOOD (or bist in progress/never run) 1360 - 1: BAD */ 1361 uint64_t reserved_5_8 : 4; 1362 uint64_t l2t : 5; /**< Bist Results for L2T (USE+4SET RAMs) 1363 - 0: GOOD (or bist in progress/never run) 1364 - 1: BAD */ 1365#else 1366 uint64_t l2t : 5; 1367 uint64_t reserved_5_8 : 4; 1368 uint64_t vab_vwcf : 1; 1369 uint64_t lrf : 2; 1370 uint64_t vwdf : 4; 1371 uint64_t reserved_16_63 : 48; 1372#endif 1373 } cn30xx; 1374 struct cvmx_l2c_bst1_cn30xx cn31xx; 1375 struct cvmx_l2c_bst1_cn38xx { 1376#ifdef __BIG_ENDIAN_BITFIELD 1377 uint64_t reserved_16_63 : 48; 1378 uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs 1379 - 0: GOOD (or bist in progress/never run) 1380 - 1: BAD */ 1381 uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC) 1382 - 0: GOOD (or bist in progress/never run) 1383 - 1: BAD */ 1384 uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM 1385 - 0: GOOD (or bist in progress/never run) 1386 - 1: BAD */ 1387 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1388 - 0: GOOD (or bist in progress/never run) 1389 - 1: BAD */ 1390#else 1391 uint64_t l2t : 9; 1392 uint64_t vab_vwcf : 1; 1393 uint64_t lrf : 2; 1394 uint64_t vwdf : 4; 1395 uint64_t reserved_16_63 : 48; 1396#endif 1397 } cn38xx; 1398 struct cvmx_l2c_bst1_cn38xx cn38xxp2; 1399 struct cvmx_l2c_bst1_cn38xx cn50xx; 1400 struct cvmx_l2c_bst1_cn52xx { 1401#ifdef __BIG_ENDIAN_BITFIELD 1402 uint64_t reserved_19_63 : 45; 1403 uint64_t plc2 : 1; /**< Bist Results for PLC2 RAM 1404 - 0: GOOD (or bist in progress/never run) 1405 - 1: BAD */ 1406 uint64_t plc1 : 1; /**< Bist Results for PLC1 RAM 1407 - 0: GOOD (or bist in progress/never run) 1408 - 1: BAD */ 1409 uint64_t plc0 : 1; /**< Bist Results for PLC0 RAM 1410 - 0: GOOD (or bist in progress/never run) 1411 - 1: BAD */ 1412 uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs 1413 - 0: GOOD (or bist in progress/never run) 1414 - 1: BAD */ 1415 uint64_t reserved_11_11 : 1; 1416 uint64_t ilc : 1; /**< Bist Results for ILC RAM 1417 - 0: GOOD (or bist in progress/never run) 1418 - 1: BAD */ 1419 uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM 1420 - 0: GOOD (or bist in progress/never run) 1421 - 1: BAD */ 1422 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1423 - 0: GOOD (or bist in progress/never run) 1424 - 1: BAD */ 1425#else 1426 uint64_t l2t : 9; 1427 uint64_t vab_vwcf : 1; 1428 uint64_t ilc : 1; 1429 uint64_t reserved_11_11 : 1; 1430 uint64_t vwdf : 4; 1431 uint64_t plc0 : 1; 1432 uint64_t plc1 : 1; 1433 uint64_t plc2 : 1; 1434 uint64_t reserved_19_63 : 45; 1435#endif 1436 } cn52xx; 1437 struct cvmx_l2c_bst1_cn52xx cn52xxp1; 1438 struct cvmx_l2c_bst1_cn56xx { 1439#ifdef __BIG_ENDIAN_BITFIELD 1440 uint64_t reserved_24_63 : 40; 1441 uint64_t plc2 : 1; /**< Bist Results for LRF RAMs (ILC) 1442 - 0: GOOD (or bist in progress/never run) 1443 - 1: BAD */ 1444 uint64_t plc1 : 1; /**< Bist Results for LRF RAMs (ILC) 1445 - 0: GOOD (or bist in progress/never run) 1446 - 1: BAD */ 1447 uint64_t plc0 : 1; /**< Bist Results for LRF RAMs (ILC) 1448 - 0: GOOD (or bist in progress/never run) 1449 - 1: BAD */ 1450 uint64_t ilc : 1; /**< Bist Results for LRF RAMs (ILC) 1451 - 0: GOOD (or bist in progress/never run) 1452 - 1: BAD */ 1453 uint64_t vwdf1 : 4; /**< Bist Results for VWDF1 RAMs 1454 - 0: GOOD (or bist in progress/never run) 1455 - 1: BAD */ 1456 uint64_t vwdf0 : 4; /**< Bist Results for VWDF0 RAMs 1457 - 0: GOOD (or bist in progress/never run) 1458 - 1: BAD */ 1459 uint64_t vab_vwcf1 : 1; /**< Bist Results for VAB VWCF1_MEM */ 1460 uint64_t reserved_10_10 : 1; 1461 uint64_t vab_vwcf0 : 1; /**< Bist Results for VAB VWCF0_MEM 1462 - 0: GOOD (or bist in progress/never run) 1463 - 1: BAD */ 1464 uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) 1465 - 0: GOOD (or bist in progress/never run) 1466 - 1: BAD */ 1467#else 1468 uint64_t l2t : 9; 1469 uint64_t vab_vwcf0 : 1; 1470 uint64_t reserved_10_10 : 1; 1471 uint64_t vab_vwcf1 : 1; 1472 uint64_t vwdf0 : 4; 1473 uint64_t vwdf1 : 4; 1474 uint64_t ilc : 1; 1475 uint64_t plc0 : 1; 1476 uint64_t plc1 : 1; 1477 uint64_t plc2 : 1; 1478 uint64_t reserved_24_63 : 40; 1479#endif 1480 } cn56xx; 1481 struct cvmx_l2c_bst1_cn56xx cn56xxp1; 1482 struct cvmx_l2c_bst1_cn38xx cn58xx; 1483 struct cvmx_l2c_bst1_cn38xx cn58xxp1; 1484}; 1485typedef union cvmx_l2c_bst1 cvmx_l2c_bst1_t; 1486 1487/** 1488 * cvmx_l2c_bst2 1489 * 1490 * L2C_BST2 = L2C BIST 2 CTL/STAT 1491 * 1492 */ 1493union cvmx_l2c_bst2 { 1494 uint64_t u64; 1495 struct cvmx_l2c_bst2_s { 1496#ifdef __BIG_ENDIAN_BITFIELD 1497 uint64_t reserved_16_63 : 48; 1498 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1499 - 0: GOOD (or bist in progress/never run) 1500 - 1: BAD */ 1501 uint64_t reserved_4_11 : 8; 1502 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1503 - 1: BAD */ 1504 uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM 1505 - 1: BAD */ 1506 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1507 - 0: GOOD (or bist in progress/never run) 1508 - 1: BAD */ 1509 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1510 - 0: GOOD (or bist in progress/never run) 1511 - 1: BAD */ 1512#else 1513 uint64_t xrddat : 1; 1514 uint64_t xrdmsk : 1; 1515 uint64_t picbst : 1; 1516 uint64_t ipcbst : 1; 1517 uint64_t reserved_4_11 : 8; 1518 uint64_t mrb : 4; 1519 uint64_t reserved_16_63 : 48; 1520#endif 1521 } s; 1522 struct cvmx_l2c_bst2_cn30xx { 1523#ifdef __BIG_ENDIAN_BITFIELD 1524 uint64_t reserved_16_63 : 48; 1525 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1526 - 0: GOOD (or bist in progress/never run) 1527 - 1: BAD */ 1528 uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs 1529 - 0: GOOD (or bist in progress/never run) 1530 - 1: BAD */ 1531 uint64_t reserved_4_7 : 4; 1532 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1533 - 0: GOOD (or bist in progress/never run) 1534 - 1: BAD */ 1535 uint64_t reserved_2_2 : 1; 1536 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1537 - 0: GOOD (or bist in progress/never run) 1538 - 1: BAD */ 1539 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1540 - 0: GOOD (or bist in progress/never run) 1541 - 1: BAD */ 1542#else 1543 uint64_t xrddat : 1; 1544 uint64_t xrdmsk : 1; 1545 uint64_t reserved_2_2 : 1; 1546 uint64_t ipcbst : 1; 1547 uint64_t reserved_4_7 : 4; 1548 uint64_t rmdf : 4; 1549 uint64_t mrb : 4; 1550 uint64_t reserved_16_63 : 48; 1551#endif 1552 } cn30xx; 1553 struct cvmx_l2c_bst2_cn30xx cn31xx; 1554 struct cvmx_l2c_bst2_cn38xx { 1555#ifdef __BIG_ENDIAN_BITFIELD 1556 uint64_t reserved_16_63 : 48; 1557 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1558 - 0: GOOD (or bist in progress/never run) 1559 - 1: BAD */ 1560 uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs 1561 - 0: GOOD (or bist in progress/never run) 1562 - 1: BAD */ 1563 uint64_t rhdf : 4; /**< Bist Results for RHDF RAMs 1564 - 0: GOOD (or bist in progress/never run) 1565 - 1: BAD */ 1566 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1567 - 1: BAD */ 1568 uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM 1569 - 1: BAD */ 1570 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1571 - 0: GOOD (or bist in progress/never run) 1572 - 1: BAD */ 1573 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1574 - 0: GOOD (or bist in progress/never run) 1575 - 1: BAD */ 1576#else 1577 uint64_t xrddat : 1; 1578 uint64_t xrdmsk : 1; 1579 uint64_t picbst : 1; 1580 uint64_t ipcbst : 1; 1581 uint64_t rhdf : 4; 1582 uint64_t rmdf : 4; 1583 uint64_t mrb : 4; 1584 uint64_t reserved_16_63 : 48; 1585#endif 1586 } cn38xx; 1587 struct cvmx_l2c_bst2_cn38xx cn38xxp2; 1588 struct cvmx_l2c_bst2_cn30xx cn50xx; 1589 struct cvmx_l2c_bst2_cn30xx cn52xx; 1590 struct cvmx_l2c_bst2_cn30xx cn52xxp1; 1591 struct cvmx_l2c_bst2_cn56xx { 1592#ifdef __BIG_ENDIAN_BITFIELD 1593 uint64_t reserved_16_63 : 48; 1594 uint64_t mrb : 4; /**< Bist Results for MRB RAMs 1595 - 0: GOOD (or bist in progress/never run) 1596 - 1: BAD */ 1597 uint64_t rmdb : 4; /**< Bist Results for RMDB RAMs 1598 - 0: GOOD (or bist in progress/never run) 1599 - 1: BAD */ 1600 uint64_t rhdb : 4; /**< Bist Results for RHDB RAMs 1601 - 0: GOOD (or bist in progress/never run) 1602 - 1: BAD */ 1603 uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM 1604 - 1: BAD */ 1605 uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM 1606 - 1: BAD */ 1607 uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM 1608 - 0: GOOD (or bist in progress/never run) 1609 - 1: BAD */ 1610 uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM 1611 - 0: GOOD (or bist in progress/never run) 1612 - 1: BAD */ 1613#else 1614 uint64_t xrddat : 1; 1615 uint64_t xrdmsk : 1; 1616 uint64_t picbst : 1; 1617 uint64_t ipcbst : 1; 1618 uint64_t rhdb : 4; 1619 uint64_t rmdb : 4; 1620 uint64_t mrb : 4; 1621 uint64_t reserved_16_63 : 48; 1622#endif 1623 } cn56xx; 1624 struct cvmx_l2c_bst2_cn56xx cn56xxp1; 1625 struct cvmx_l2c_bst2_cn56xx cn58xx; 1626 struct cvmx_l2c_bst2_cn56xx cn58xxp1; 1627}; 1628typedef union cvmx_l2c_bst2 cvmx_l2c_bst2_t; 1629 1630/** 1631 * cvmx_l2c_bst_mem# 1632 * 1633 * L2C_BST_MEM = L2C MEM BIST Status 1634 * 1635 * 1636 * Notes: 1637 * (1) CLEAR_BIST must be written to 1 before START_BIST is written to 1 using a separate CSR write. 1638 * 1639 * (2) CLEAR_BIST must not be changed after writing START_BIST to 1 until the BIST operation completes 1640 * (indicated by START_BIST returning to 0) or operation is undefined. 1641 */ 1642union cvmx_l2c_bst_memx { 1643 uint64_t u64; 1644 struct cvmx_l2c_bst_memx_s { 1645#ifdef __BIG_ENDIAN_BITFIELD 1646 uint64_t start_bist : 1; /**< When written to 1, starts BIST. Will read 1 until 1647 BIST is complete (see Note). */ 1648 uint64_t clear_bist : 1; /**< When BIST is triggered, run clear BIST (see Note) */ 1649 uint64_t reserved_5_61 : 57; 1650 uint64_t rdffl : 1; /**< BIST failure status for RDF */ 1651 uint64_t vbffl : 4; /**< BIST failure status for VBF0-3 */ 1652#else 1653 uint64_t vbffl : 4; 1654 uint64_t rdffl : 1; 1655 uint64_t reserved_5_61 : 57; 1656 uint64_t clear_bist : 1; 1657 uint64_t start_bist : 1; 1658#endif 1659 } s; 1660 struct cvmx_l2c_bst_memx_s cn61xx; 1661 struct cvmx_l2c_bst_memx_s cn63xx; 1662 struct cvmx_l2c_bst_memx_s cn63xxp1; 1663 struct cvmx_l2c_bst_memx_s cn66xx; 1664 struct cvmx_l2c_bst_memx_s cn68xx; 1665 struct cvmx_l2c_bst_memx_s cn68xxp1; 1666 struct cvmx_l2c_bst_memx_s cnf71xx; 1667}; 1668typedef union cvmx_l2c_bst_memx cvmx_l2c_bst_memx_t; 1669 1670/** 1671 * cvmx_l2c_bst_tdt# 1672 * 1673 * L2C_BST_TDT = L2C TAD DaTa BIST Status 1674 * 1675 */ 1676union cvmx_l2c_bst_tdtx { 1677 uint64_t u64; 1678 struct cvmx_l2c_bst_tdtx_s { 1679#ifdef __BIG_ENDIAN_BITFIELD 1680 uint64_t reserved_32_63 : 32; 1681 uint64_t fbfrspfl : 8; /**< BIST failure status for quad 0-7 FBF RSP read port */ 1682 uint64_t sbffl : 8; /**< BIST failure status for quad 0-7 SBF */ 1683 uint64_t fbffl : 8; /**< BIST failure status for quad 0-7 FBF WRP read port */ 1684 uint64_t l2dfl : 8; /**< BIST failure status for quad 0-7 L2D */ 1685#else 1686 uint64_t l2dfl : 8; 1687 uint64_t fbffl : 8; 1688 uint64_t sbffl : 8; 1689 uint64_t fbfrspfl : 8; 1690 uint64_t reserved_32_63 : 32; 1691#endif 1692 } s; 1693 struct cvmx_l2c_bst_tdtx_s cn61xx; 1694 struct cvmx_l2c_bst_tdtx_s cn63xx; 1695 struct cvmx_l2c_bst_tdtx_cn63xxp1 { 1696#ifdef __BIG_ENDIAN_BITFIELD 1697 uint64_t reserved_24_63 : 40; 1698 uint64_t sbffl : 8; /**< BIST failure status for quad 0-7 SBF */ 1699 uint64_t fbffl : 8; /**< BIST failure status for quad 0-7 FBF */ 1700 uint64_t l2dfl : 8; /**< BIST failure status for quad 0-7 L2D */ 1701#else 1702 uint64_t l2dfl : 8; 1703 uint64_t fbffl : 8; 1704 uint64_t sbffl : 8; 1705 uint64_t reserved_24_63 : 40; 1706#endif 1707 } cn63xxp1; 1708 struct cvmx_l2c_bst_tdtx_s cn66xx; 1709 struct cvmx_l2c_bst_tdtx_s cn68xx; 1710 struct cvmx_l2c_bst_tdtx_s cn68xxp1; 1711 struct cvmx_l2c_bst_tdtx_s cnf71xx; 1712}; 1713typedef union cvmx_l2c_bst_tdtx cvmx_l2c_bst_tdtx_t; 1714 1715/** 1716 * cvmx_l2c_bst_ttg# 1717 * 1718 * L2C_BST_TTG = L2C TAD TaG BIST Status 1719 * 1720 */ 1721union cvmx_l2c_bst_ttgx { 1722 uint64_t u64; 1723 struct cvmx_l2c_bst_ttgx_s { 1724#ifdef __BIG_ENDIAN_BITFIELD 1725 uint64_t reserved_17_63 : 47; 1726 uint64_t lrufl : 1; /**< BIST failure status for tag LRU */ 1727 uint64_t tagfl : 16; /**< BIST failure status for tag ways 0-15 */ 1728#else 1729 uint64_t tagfl : 16; 1730 uint64_t lrufl : 1; 1731 uint64_t reserved_17_63 : 47; 1732#endif 1733 } s; 1734 struct cvmx_l2c_bst_ttgx_s cn61xx; 1735 struct cvmx_l2c_bst_ttgx_s cn63xx; 1736 struct cvmx_l2c_bst_ttgx_s cn63xxp1; 1737 struct cvmx_l2c_bst_ttgx_s cn66xx; 1738 struct cvmx_l2c_bst_ttgx_s cn68xx; 1739 struct cvmx_l2c_bst_ttgx_s cn68xxp1; 1740 struct cvmx_l2c_bst_ttgx_s cnf71xx; 1741}; 1742typedef union cvmx_l2c_bst_ttgx cvmx_l2c_bst_ttgx_t; 1743 1744/** 1745 * cvmx_l2c_cfg 1746 * 1747 * Specify the RSL base addresses for the block 1748 * 1749 * L2C_CFG = L2C Configuration 1750 * 1751 * Description: 1752 */ 1753union cvmx_l2c_cfg { 1754 uint64_t u64; 1755 struct cvmx_l2c_cfg_s { 1756#ifdef __BIG_ENDIAN_BITFIELD 1757 uint64_t reserved_20_63 : 44; 1758 uint64_t bstrun : 1; /**< L2 Data Store Bist Running 1759 Indicates when the L2C HW Bist sequence(short or long) is 1760 running. [L2C ECC Bist FSM is not in the RESET/DONE state] */ 1761 uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence 1762 When the previous state was '0' and SW writes a '1', 1763 the long bist sequence (enhanced 13N March) is performed. 1764 SW can then read the L2C_CFG[BSTRUN] which will indicate 1765 that the long bist sequence is running. When BSTRUN-=0, 1766 the state of the L2D_BST[0-3] registers contain information 1767 which reflects the status of the recent long bist sequence. 1768 NOTE: SW must never write LBIST=0 while Long Bist is running 1769 (ie: when BSTRUN=1 never write LBIST=0). 1770 NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS 1771 Fuse is blown. */ 1772 uint64_t xor_bank : 1; /**< L2C XOR Bank Bit 1773 When both LMC's are enabled(DPRES1=1/DPRES0=1), this 1774 bit determines how addresses are assigned to 1775 LMC port(s). 1776 XOR_BANK| LMC# 1777 ----------+--------------------------------- 1778 0 | byte address[7] 1779 1 | byte address[7] XOR byte address[12] 1780 Example: If both LMC ports are enabled (DPRES1=1/DPRES0=1) 1781 and XOR_BANK=1, then addr[7] XOR addr[12] is used to determine 1782 which LMC Port# a reference is directed to. */ 1783 uint64_t dpres1 : 1; /**< DDR1 Present/LMC1 Enable 1784 When DPRES1 is set, LMC#1 is enabled(DDR1 pins at 1785 the BOTTOM of the chip are active). 1786 NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1), 1787 see XOR_BANK bit to determine how a reference is 1788 assigned to a DDR/LMC port. (Also, in dual-LMC configuration, 1789 the address sent to the targeted LMC port is the 1790 address shifted right by one). 1791 NOTE: For power-savings, the DPRES1 is also used to 1792 disable DDR1/LMC1 clocks. */ 1793 uint64_t dpres0 : 1; /**< DDR0 Present/LMC0 Enable 1794 When DPRES0 is set, LMC#0 is enabled(DDR0 pins at 1795 the BOTTOM of the chip are active). 1796 NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1), 1797 see XOR_BANK bit to determine how a reference is 1798 assigned to a DDR/LMC port. (Also, in dual-LMC configuration, 1799 the address sent to the targeted LMC port is the 1800 address shifted right by one). 1801 NOTE: For power-savings, the DPRES0 is also used to 1802 disable DDR0/LMC0 clocks. */ 1803 uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable 1804 When set, the L2C dual-fill performance feature is 1805 disabled. 1806 NOTE: This bit is only intended to evaluate the 1807 effectiveness of the dual-fill feature. For OPTIMAL 1808 performance, this bit should ALWAYS be zero. */ 1809 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 1810 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1811 When FPEN is enabled and the LFB is empty, the 1812 forward progress counter (FPCNT) is initialized to: 1813 FPCNT[24:0] = 2^(9+FPEXP) 1814 When the LFB is non-empty the FPCNT is decremented 1815 (every eclk interval). If the FPCNT reaches zero, 1816 the LFB no longer accepts new requests until either 1817 a) all of the current LFB entries have completed 1818 (to ensure forward progress). 1819 b) FPEMPTY=0 and another forward progress count 1820 interval timeout expires. 1821 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 1822 (For eclk=500MHz(2ns), this would be ~4us). */ 1823 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 1824 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1825 When set, if the forward progress counter expires, 1826 all new LFB-NQs are stopped UNTIL all current LFB 1827 entries have completed. 1828 When clear, if the forward progress counter expires, 1829 all new LFB-NQs are stopped UNTIL either 1830 a) all current LFB entries have completed. 1831 b) another forward progress interval expires 1832 NOTE: We may want to FREEZE/HANG the system when 1833 we encounter an LFB entry cannot complete, and there 1834 may be times when we want to allow further LFB-NQs 1835 to be permitted to help in further analyzing the 1836 source */ 1837 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 1838 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1839 When set, enables the Forward Progress Counter to 1840 prevent new LFB entries from enqueueing until ALL 1841 current LFB entries have completed. */ 1842 uint64_t idxalias : 1; /**< L2C Index Alias Enable 1843 When set, the L2 Tag/Data Store will alias the 11-bit 1844 index with the low order 11-bits of the tag. 1845 index[17:7] = (tag[28:18] ^ index[17:7]) 1846 NOTE: This bit must only be modified at boot time, 1847 when it can be guaranteed that no blocks have been 1848 loaded into the L2 Cache. 1849 The index aliasing is a performance enhancement feature 1850 which reduces the L2 cache thrashing experienced for 1851 regular stride references. 1852 NOTE: The index alias is stored in the LFB and VAB, and 1853 its effects are reversed for memory references (Victims, 1854 STT-Misses and Read-Misses) */ 1855 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 1856 become less than or equal to the MWF_CRD, the L2C will 1857 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 1858 writes (victims) higher priority. */ 1859 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 1860 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 1861 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 1862 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 1863 STRSC(ST RSP no invalidate)] */ 1864 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 1865 - 0: Fixed Priority - 1866 IOB->PP requests are higher priority than 1867 PP->IOB requests 1868 - 1: Round Robin - 1869 I/O requests from PP and IOB are serviced in 1870 round robin */ 1871 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 1872 - 0: Fixed Priority - 1873 IOB memory requests are higher priority than PP 1874 memory requests. 1875 - 1: Round Robin - 1876 Memory requests from PP and IOB are serviced in 1877 round robin. */ 1878#else 1879 uint64_t lrf_arb_mode : 1; 1880 uint64_t rfb_arb_mode : 1; 1881 uint64_t rsp_arb_mode : 1; 1882 uint64_t mwf_crd : 4; 1883 uint64_t idxalias : 1; 1884 uint64_t fpen : 1; 1885 uint64_t fpempty : 1; 1886 uint64_t fpexp : 4; 1887 uint64_t dfill_dis : 1; 1888 uint64_t dpres0 : 1; 1889 uint64_t dpres1 : 1; 1890 uint64_t xor_bank : 1; 1891 uint64_t lbist : 1; 1892 uint64_t bstrun : 1; 1893 uint64_t reserved_20_63 : 44; 1894#endif 1895 } s; 1896 struct cvmx_l2c_cfg_cn30xx { 1897#ifdef __BIG_ENDIAN_BITFIELD 1898 uint64_t reserved_14_63 : 50; 1899 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 1900 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1901 When FPEN is enabled and the LFB is empty, the 1902 forward progress counter (FPCNT) is initialized to: 1903 FPCNT[24:0] = 2^(9+FPEXP) 1904 When the LFB is non-empty the FPCNT is decremented 1905 (every eclk interval). If the FPCNT reaches zero, 1906 the LFB no longer accepts new requests until either 1907 a) all of the current LFB entries have completed 1908 (to ensure forward progress). 1909 b) FPEMPTY=0 and another forward progress count 1910 interval timeout expires. 1911 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 1912 (For eclk=500MHz(2ns), this would be ~4us). */ 1913 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 1914 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1915 When set, if the forward progress counter expires, 1916 all new LFB-NQs are stopped UNTIL all current LFB 1917 entries have completed. 1918 When clear, if the forward progress counter expires, 1919 all new LFB-NQs are stopped UNTIL either 1920 a) all current LFB entries have completed. 1921 b) another forward progress interval expires 1922 NOTE: We may want to FREEZE/HANG the system when 1923 we encounter an LFB entry cannot complete, and there 1924 may be times when we want to allow further LFB-NQs 1925 to be permitted to help in further analyzing the 1926 source */ 1927 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 1928 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 1929 When set, enables the Forward Progress Counter to 1930 prevent new LFB entries from enqueueing until ALL 1931 current LFB entries have completed. */ 1932 uint64_t idxalias : 1; /**< L2C Index Alias Enable 1933 When set, the L2 Tag/Data Store will alias the 8-bit 1934 index with the low order 8-bits of the tag. 1935 index[14:7] = (tag[22:15] ^ index[14:7]) 1936 NOTE: This bit must only be modified at boot time, 1937 when it can be guaranteed that no blocks have been 1938 loaded into the L2 Cache. 1939 The index aliasing is a performance enhancement feature 1940 which reduces the L2 cache thrashing experienced for 1941 regular stride references. 1942 NOTE: The index alias is stored in the LFB and VAB, and 1943 its effects are reversed for memory references (Victims, 1944 STT-Misses and Read-Misses) */ 1945 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 1946 become less than or equal to the MWF_CRD, the L2C will 1947 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 1948 writes (victims) higher priority. */ 1949 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 1950 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 1951 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 1952 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 1953 STRSC(ST RSP no invalidate)] */ 1954 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 1955 - 0: Fixed Priority - 1956 IOB->PP requests are higher priority than 1957 PP->IOB requests 1958 - 1: Round Robin - 1959 I/O requests from PP and IOB are serviced in 1960 round robin */ 1961 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 1962 - 0: Fixed Priority - 1963 IOB memory requests are higher priority than PP 1964 memory requests. 1965 - 1: Round Robin - 1966 Memory requests from PP and IOB are serviced in 1967 round robin. */ 1968#else 1969 uint64_t lrf_arb_mode : 1; 1970 uint64_t rfb_arb_mode : 1; 1971 uint64_t rsp_arb_mode : 1; 1972 uint64_t mwf_crd : 4; 1973 uint64_t idxalias : 1; 1974 uint64_t fpen : 1; 1975 uint64_t fpempty : 1; 1976 uint64_t fpexp : 4; 1977 uint64_t reserved_14_63 : 50; 1978#endif 1979 } cn30xx; 1980 struct cvmx_l2c_cfg_cn30xx cn31xx; 1981 struct cvmx_l2c_cfg_cn30xx cn38xx; 1982 struct cvmx_l2c_cfg_cn30xx cn38xxp2; 1983 struct cvmx_l2c_cfg_cn50xx { 1984#ifdef __BIG_ENDIAN_BITFIELD 1985 uint64_t reserved_20_63 : 44; 1986 uint64_t bstrun : 1; /**< L2 Data Store Bist Running 1987 Indicates when the L2C HW Bist sequence(short or long) is 1988 running. [L2C ECC Bist FSM is not in the RESET/DONE state] */ 1989 uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence 1990 When the previous state was '0' and SW writes a '1', 1991 the long bist sequence (enhanced 13N March) is performed. 1992 SW can then read the L2C_CFG[BSTRUN] which will indicate 1993 that the long bist sequence is running. When BSTRUN-=0, 1994 the state of the L2D_BST[0-3] registers contain information 1995 which reflects the status of the recent long bist sequence. 1996 NOTE: SW must never write LBIST=0 while Long Bist is running 1997 (ie: when BSTRUN=1 never write LBIST=0). */ 1998 uint64_t reserved_14_17 : 4; 1999 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 2000 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2001 When FPEN is enabled and the LFB is empty, the 2002 forward progress counter (FPCNT) is initialized to: 2003 FPCNT[24:0] = 2^(9+FPEXP) 2004 When the LFB is non-empty the FPCNT is decremented 2005 (every eclk interval). If the FPCNT reaches zero, 2006 the LFB no longer accepts new requests until either 2007 a) all of the current LFB entries have completed 2008 (to ensure forward progress). 2009 b) FPEMPTY=0 and another forward progress count 2010 interval timeout expires. 2011 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 2012 (For eclk=500MHz(2ns), this would be ~4us). */ 2013 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 2014 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2015 When set, if the forward progress counter expires, 2016 all new LFB-NQs are stopped UNTIL all current LFB 2017 entries have completed. 2018 When clear, if the forward progress counter expires, 2019 all new LFB-NQs are stopped UNTIL either 2020 a) all current LFB entries have completed. 2021 b) another forward progress interval expires 2022 NOTE: We may want to FREEZE/HANG the system when 2023 we encounter an LFB entry cannot complete, and there 2024 may be times when we want to allow further LFB-NQs 2025 to be permitted to help in further analyzing the 2026 source */ 2027 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 2028 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2029 When set, enables the Forward Progress Counter to 2030 prevent new LFB entries from enqueueing until ALL 2031 current LFB entries have completed. */ 2032 uint64_t idxalias : 1; /**< L2C Index Alias Enable 2033 When set, the L2 Tag/Data Store will alias the 7-bit 2034 index with the low order 7-bits of the tag. 2035 index[13:7] = (tag[20:14] ^ index[13:7]) 2036 NOTE: This bit must only be modified at boot time, 2037 when it can be guaranteed that no blocks have been 2038 loaded into the L2 Cache. 2039 The index aliasing is a performance enhancement feature 2040 which reduces the L2 cache thrashing experienced for 2041 regular stride references. 2042 NOTE: The index alias is stored in the LFB and VAB, and 2043 its effects are reversed for memory references (Victims, 2044 STT-Misses and Read-Misses) */ 2045 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 2046 become less than or equal to the MWF_CRD, the L2C will 2047 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 2048 writes (victims) higher priority. */ 2049 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 2050 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 2051 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 2052 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 2053 STRSC(ST RSP no invalidate)] */ 2054 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 2055 - 0: Fixed Priority - 2056 IOB->PP requests are higher priority than 2057 PP->IOB requests 2058 - 1: Round Robin - 2059 I/O requests from PP and IOB are serviced in 2060 round robin */ 2061 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 2062 - 0: Fixed Priority - 2063 IOB memory requests are higher priority than PP 2064 memory requests. 2065 - 1: Round Robin - 2066 Memory requests from PP and IOB are serviced in 2067 round robin. */ 2068#else 2069 uint64_t lrf_arb_mode : 1; 2070 uint64_t rfb_arb_mode : 1; 2071 uint64_t rsp_arb_mode : 1; 2072 uint64_t mwf_crd : 4; 2073 uint64_t idxalias : 1; 2074 uint64_t fpen : 1; 2075 uint64_t fpempty : 1; 2076 uint64_t fpexp : 4; 2077 uint64_t reserved_14_17 : 4; 2078 uint64_t lbist : 1; 2079 uint64_t bstrun : 1; 2080 uint64_t reserved_20_63 : 44; 2081#endif 2082 } cn50xx; 2083 struct cvmx_l2c_cfg_cn50xx cn52xx; 2084 struct cvmx_l2c_cfg_cn50xx cn52xxp1; 2085 struct cvmx_l2c_cfg_s cn56xx; 2086 struct cvmx_l2c_cfg_s cn56xxp1; 2087 struct cvmx_l2c_cfg_cn58xx { 2088#ifdef __BIG_ENDIAN_BITFIELD 2089 uint64_t reserved_20_63 : 44; 2090 uint64_t bstrun : 1; /**< L2 Data Store Bist Running 2091 Indicates when the L2C HW Bist sequence(short or long) is 2092 running. [L2C ECC Bist FSM is not in the RESET/DONE state] */ 2093 uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence 2094 When the previous state was '0' and SW writes a '1', 2095 the long bist sequence (enhanced 13N March) is performed. 2096 SW can then read the L2C_CFG[BSTRUN] which will indicate 2097 that the long bist sequence is running. When BSTRUN-=0, 2098 the state of the L2D_BST[0-3] registers contain information 2099 which reflects the status of the recent long bist sequence. 2100 NOTE: SW must never write LBIST=0 while Long Bist is running 2101 (ie: when BSTRUN=1 never write LBIST=0). 2102 NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS 2103 Fuse is blown. */ 2104 uint64_t reserved_15_17 : 3; 2105 uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable 2106 When set, the L2C dual-fill performance feature is 2107 disabled. 2108 NOTE: This bit is only intended to evaluate the 2109 effectiveness of the dual-fill feature. For OPTIMAL 2110 performance, this bit should ALWAYS be zero. */ 2111 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 2112 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2113 When FPEN is enabled and the LFB is empty, the 2114 forward progress counter (FPCNT) is initialized to: 2115 FPCNT[24:0] = 2^(9+FPEXP) 2116 When the LFB is non-empty the FPCNT is decremented 2117 (every eclk interval). If the FPCNT reaches zero, 2118 the LFB no longer accepts new requests until either 2119 a) all of the current LFB entries have completed 2120 (to ensure forward progress). 2121 b) FPEMPTY=0 and another forward progress count 2122 interval timeout expires. 2123 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 2124 (For eclk=500MHz(2ns), this would be ~4us). */ 2125 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 2126 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2127 When set, if the forward progress counter expires, 2128 all new LFB-NQs are stopped UNTIL all current LFB 2129 entries have completed. 2130 When clear, if the forward progress counter expires, 2131 all new LFB-NQs are stopped UNTIL either 2132 a) all current LFB entries have completed. 2133 b) another forward progress interval expires 2134 NOTE: We may want to FREEZE/HANG the system when 2135 we encounter an LFB entry cannot complete, and there 2136 may be times when we want to allow further LFB-NQs 2137 to be permitted to help in further analyzing the 2138 source */ 2139 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 2140 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2141 When set, enables the Forward Progress Counter to 2142 prevent new LFB entries from enqueueing until ALL 2143 current LFB entries have completed. */ 2144 uint64_t idxalias : 1; /**< L2C Index Alias Enable 2145 When set, the L2 Tag/Data Store will alias the 11-bit 2146 index with the low order 11-bits of the tag. 2147 index[17:7] = (tag[28:18] ^ index[17:7]) 2148 NOTE: This bit must only be modified at boot time, 2149 when it can be guaranteed that no blocks have been 2150 loaded into the L2 Cache. 2151 The index aliasing is a performance enhancement feature 2152 which reduces the L2 cache thrashing experienced for 2153 regular stride references. 2154 NOTE: The index alias is stored in the LFB and VAB, and 2155 its effects are reversed for memory references (Victims, 2156 STT-Misses and Read-Misses) */ 2157 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 2158 become less than or equal to the MWF_CRD, the L2C will 2159 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 2160 writes (victims) higher priority. */ 2161 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 2162 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 2163 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 2164 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 2165 STRSC(ST RSP no invalidate)] */ 2166 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 2167 - 0: Fixed Priority - 2168 IOB->PP requests are higher priority than 2169 PP->IOB requests 2170 - 1: Round Robin - 2171 I/O requests from PP and IOB are serviced in 2172 round robin */ 2173 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 2174 - 0: Fixed Priority - 2175 IOB memory requests are higher priority than PP 2176 memory requests. 2177 - 1: Round Robin - 2178 Memory requests from PP and IOB are serviced in 2179 round robin. */ 2180#else 2181 uint64_t lrf_arb_mode : 1; 2182 uint64_t rfb_arb_mode : 1; 2183 uint64_t rsp_arb_mode : 1; 2184 uint64_t mwf_crd : 4; 2185 uint64_t idxalias : 1; 2186 uint64_t fpen : 1; 2187 uint64_t fpempty : 1; 2188 uint64_t fpexp : 4; 2189 uint64_t dfill_dis : 1; 2190 uint64_t reserved_15_17 : 3; 2191 uint64_t lbist : 1; 2192 uint64_t bstrun : 1; 2193 uint64_t reserved_20_63 : 44; 2194#endif 2195 } cn58xx; 2196 struct cvmx_l2c_cfg_cn58xxp1 { 2197#ifdef __BIG_ENDIAN_BITFIELD 2198 uint64_t reserved_15_63 : 49; 2199 uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable 2200 When set, the L2C dual-fill performance feature is 2201 disabled. 2202 NOTE: This bit is only intended to evaluate the 2203 effectiveness of the dual-fill feature. For OPTIMAL 2204 performance, this bit should ALWAYS be zero. */ 2205 uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent 2206 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2207 When FPEN is enabled and the LFB is empty, the 2208 forward progress counter (FPCNT) is initialized to: 2209 FPCNT[24:0] = 2^(9+FPEXP) 2210 When the LFB is non-empty the FPCNT is decremented 2211 (every eclk interval). If the FPCNT reaches zero, 2212 the LFB no longer accepts new requests until either 2213 a) all of the current LFB entries have completed 2214 (to ensure forward progress). 2215 b) FPEMPTY=0 and another forward progress count 2216 interval timeout expires. 2217 EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. 2218 (For eclk=500MHz(2ns), this would be ~4us). */ 2219 uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty 2220 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2221 When set, if the forward progress counter expires, 2222 all new LFB-NQs are stopped UNTIL all current LFB 2223 entries have completed. 2224 When clear, if the forward progress counter expires, 2225 all new LFB-NQs are stopped UNTIL either 2226 a) all current LFB entries have completed. 2227 b) another forward progress interval expires 2228 NOTE: We may want to FREEZE/HANG the system when 2229 we encounter an LFB entry cannot complete, and there 2230 may be times when we want to allow further LFB-NQs 2231 to be permitted to help in further analyzing the 2232 source */ 2233 uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable 2234 NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] 2235 When set, enables the Forward Progress Counter to 2236 prevent new LFB entries from enqueueing until ALL 2237 current LFB entries have completed. */ 2238 uint64_t idxalias : 1; /**< L2C Index Alias Enable 2239 When set, the L2 Tag/Data Store will alias the 11-bit 2240 index with the low order 11-bits of the tag. 2241 index[17:7] = (tag[28:18] ^ index[17:7]) 2242 NOTE: This bit must only be modified at boot time, 2243 when it can be guaranteed that no blocks have been 2244 loaded into the L2 Cache. 2245 The index aliasing is a performance enhancement feature 2246 which reduces the L2 cache thrashing experienced for 2247 regular stride references. 2248 NOTE: The index alias is stored in the LFB and VAB, and 2249 its effects are reversed for memory references (Victims, 2250 STT-Misses and Read-Misses) */ 2251 uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits 2252 become less than or equal to the MWF_CRD, the L2C will 2253 assert l2c__lmi_mwd_hiwater_a to signal the LMC to give 2254 writes (victims) higher priority. */ 2255 uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: 2256 - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] 2257 - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), 2258 RHCF(RdHit), STRSP(ST RSP w/ invalidate), 2259 STRSC(ST RSP no invalidate)] */ 2260 uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: 2261 - 0: Fixed Priority - 2262 IOB->PP requests are higher priority than 2263 PP->IOB requests 2264 - 1: Round Robin - 2265 I/O requests from PP and IOB are serviced in 2266 round robin */ 2267 uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: 2268 - 0: Fixed Priority - 2269 IOB memory requests are higher priority than PP 2270 memory requests. 2271 - 1: Round Robin - 2272 Memory requests from PP and IOB are serviced in 2273 round robin. */ 2274#else 2275 uint64_t lrf_arb_mode : 1; 2276 uint64_t rfb_arb_mode : 1; 2277 uint64_t rsp_arb_mode : 1; 2278 uint64_t mwf_crd : 4; 2279 uint64_t idxalias : 1; 2280 uint64_t fpen : 1; 2281 uint64_t fpempty : 1; 2282 uint64_t fpexp : 4; 2283 uint64_t dfill_dis : 1; 2284 uint64_t reserved_15_63 : 49; 2285#endif 2286 } cn58xxp1; 2287}; 2288typedef union cvmx_l2c_cfg cvmx_l2c_cfg_t; 2289 2290/** 2291 * cvmx_l2c_cop0_map# 2292 * 2293 * L2C_COP0_MAP = PP COP0 register memory mapped region 2294 * 2295 * Description: PP COP0 register mapped region. 2296 * 2297 * NOTE: for 63xx, if the PPID is outside the range of 0-3,63 the write will be ignored and reads 2298 * will return 0x2bad2bad2bad2bad 2299 * 2300 * Notes: 2301 * (1) There are 256 COP0 registers per PP. Registers 0-255 map to PP0's COP0 registers, 256-511 are 2302 * mapped to PP1's, etc. A special set X PP63 (registers 16128-16383) are for broadcast writes. 2303 * Any write done to these registers will take effect in ALL PPs. Note the means the L2C_COP0_MAP 2304 * register to access can be gotten by: 2305 * 2306 * REGNUM = [ PPID[5:0], rd[4:0], sel[2:0] ] 2307 * 2308 * where rd and sel are as defined in the HRM description of Core Coprocessor 0 registers 2309 * and note 4 below. 2310 * 2311 * (2) if a COP0 register cannot be accessed by this mechanism the write be silently ignored and the 2312 * read data will be 0xBADDEED. 2313 * 2314 * (3) for 61xx, if the PPID is outside the range of 0-3,63 or if the PP in question is in reset a 2315 * write will be ignored and reads will timeout the RSL bus. 2316 * 2317 * (4) Referring to note (1) above, the following rd/sel values are supported: 2318 * 2319 * NOTE: Put only the "Customer type" in HRM. do not put the "Real type" in HRM. 2320 * 2321 * Customer Real 2322 * rd sel type Description type 2323 * ======+=======+==========+==============================================+========= 2324 * 4 2 RO COP0 UserLocal RW 2325 * 7 0 RO COP0 HWREna RW 2326 * 9 0 RO COP0 Count RW 2327 * 9 6 RO COP0 CvmCount RW 2328 * 9 7 RO COP0 CvmCtl RW 2329 * 11 0 RO COP0 Compare RW 2330 * 11 6 RW COP0 PowThrottle RW 2331 * 12 0 RO COP0 Status RW 2332 * 12 1 RO COP0 IntCtl RO 2333 * 12 2 RO COP0 SRSCtl RO 2334 * 13 0 RO COP0 Cause RW 2335 * 14 0 RO COP0 EPC RW 2336 * 15 0 RO COP0 PrID RO 2337 * 15 1 RO COP0 EBase RW 2338 * 16 0 RO PC Issue Debug Info (see details below) RO 2339 * 16 1 RO PC Fetch Debug Info (see details below) RO 2340 * 16 2 RO PC Fill Debug Info (see details below) RO 2341 * 16 3 RO PC Misc Debug Info (see details below) RO 2342 * 18 0 RO COP0 WatchLo0 RW 2343 * 19 0 RO COP0 WatchHi0 RW 2344 * 22 0 RO COP0 MultiCoreDebug RW 2345 * 22 1 COP0 VoltageMonitor RW 2346 * 23 0 RO COP0 Debug RW 2347 * 23 6 RO COP0 Debug2 RO 2348 * 24 0 RO COP0 DEPC RW 2349 * 25 0 RO COP0 PerfCnt Control0 RW 2350 * 25 1 RO COP0 PerfCnt Counter0 RW 2351 * 25 2 RO COP0 PerfCnt Control1 RW 2352 * 25 3 RO COP0 PerfCnt Counter1 RW 2353 * 27 0 RO COP0 CacheErr (icache) RW 2354 * 28 0 RO COP0 TagLo (icache) RW 2355 * 28 1 RO COP0 DataLo (icache) RW 2356 * 29 1 RO COP0 DataHi (icache) RW 2357 * 30 0 RO COP0 ErrorEPC RW 2358 * 31 0 RO COP0 DESAVE RW 2359 * 31 2 RO COP0 Scratch RW 2360 * 31 3 RO COP0 Scratch1 RW 2361 * 31 4 RO COP0 Scratch2 RW 2362 * 2363 * - PC Issue Debug Info 2364 * 2365 * - 63:2 pc0_5a<63:2> // often VA<63:2> of the next instruction to issue 2366 * // but can also be the VA of an instruction executing/replaying on pipe 0 2367 * // or can also be a VA being filled into the instruction cache 2368 * // or can also be unpredictable 2369 * // <61:49> RAZ 2370 * 1 illegal // set when illegal VA 2371 * 0 delayslot // set when VA is delayslot (prior branch may be either taken or not taken) 2372 * 2373 * - PC Fetch Debug Info 2374 * 2375 * - 63:0 fetch_address_3a // VA being fetched from the instruction cache 2376 * // <61:49>, <1:0> RAZ 2377 * 2378 * - PC Fill Debug Info 2379 * 2380 * - 63:0 fill_address_4a<63:2> // VA<63:2> being filled into instruction cache 2381 * // valid when waiting_for_ifill_4a is set (see PC Misc Debug Info below) 2382 * // <61:49> RAZ 2383 * 1 illegal // set when illegal VA 2384 * 0 RAZ 2385 * 2386 * - PC Misc Debug Info 2387 * 2388 * - 63:3 RAZ 2389 * 2 mem_stall_3a // stall term from L1 memory system 2390 * 1 waiting_for_pfill_4a // when waiting_for_ifill_4a is set, indicates whether instruction cache fill is due to a prefetch 2391 * 0 waiting_for_ifill_4a // set when there is an outstanding instruction cache fill 2392 */ 2393union cvmx_l2c_cop0_mapx { 2394 uint64_t u64; 2395 struct cvmx_l2c_cop0_mapx_s { 2396#ifdef __BIG_ENDIAN_BITFIELD 2397 uint64_t data : 64; /**< Data to write to/read from designated PP's COP0 2398 register. */ 2399#else 2400 uint64_t data : 64; 2401#endif 2402 } s; 2403 struct cvmx_l2c_cop0_mapx_s cn61xx; 2404 struct cvmx_l2c_cop0_mapx_s cn63xx; 2405 struct cvmx_l2c_cop0_mapx_s cn63xxp1; 2406 struct cvmx_l2c_cop0_mapx_s cn66xx; 2407 struct cvmx_l2c_cop0_mapx_s cn68xx; 2408 struct cvmx_l2c_cop0_mapx_s cn68xxp1; 2409 struct cvmx_l2c_cop0_mapx_s cnf71xx; 2410}; 2411typedef union cvmx_l2c_cop0_mapx cvmx_l2c_cop0_mapx_t; 2412 2413/** 2414 * cvmx_l2c_ctl 2415 * 2416 * L2C_CTL = L2C Control 2417 * 2418 * 2419 * Notes: 2420 * (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB. 2421 * 2422 * (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE, and VBFDBE errors for 2423 * the purposes of testing error handling code. When one (or both) of these bits are set a PL2 2424 * which misses in the L2 will fill with the appropriate error in the first 2 OWs of the fill. 2425 * Software can determine which OW pair gets the error by choosing the desired fill order 2426 * (address<6:5>). A PL2 which hits in the L2 will not inject any errors. Therefore sending a 2427 * WBIL2 prior to the PL2 is recommended to make a miss likely (if multiple processors are involved 2428 * software must be careful to be sure no other processor or IO device can bring the block into the 2429 * L2). 2430 * 2431 * To generate a VBFSBE or VBFDBE, software must first get the cache block into the cache with an 2432 * error using a PL2 which misses the L2. Then a store partial to a portion of the cache block 2433 * without the error must change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will 2434 * trigger the VBFSBE/VBFDBE error. 2435 */ 2436union cvmx_l2c_ctl { 2437 uint64_t u64; 2438 struct cvmx_l2c_ctl_s { 2439#ifdef __BIG_ENDIAN_BITFIELD 2440 uint64_t reserved_30_63 : 34; 2441 uint64_t sepcmt : 1; /**< Sends all invals before the corresponding commit. */ 2442 uint64_t rdf_fast : 1; /**< When 0, delay read data fifo from DCLK to RCLK by one 2443 cycle. Needed when DCLK:RCLK ratio > 3:1. Should be 2444 set before DDR traffic begins and only changed when 2445 memory traffic is idle. */ 2446 uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */ 2447 uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */ 2448 uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */ 2449 uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */ 2450 uint64_t maxvab : 4; /**< Maximum VABs in use at once 2451 (0 means 16, 1-15 as expected) */ 2452 uint64_t maxlfb : 4; /**< Maximum LFBs in use at once 2453 (0 means 16, 1-15 as expected) */ 2454 uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus 2455 == 0, round-robin 2456 == 1, static priority 2457 1. IOR data 2458 2. STIN/FILLs 2459 3. STDN/SCDN/SCFL */ 2460 uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues 2461 == 0, fully determined through QOS 2462 == 1, QOS0 highest priority, QOS1-3 use normal mode */ 2463 uint64_t ef_ena : 1; /**< LMC early fill enable */ 2464 uint64_t ef_cnt : 7; /**< LMC early fill count 2465 Specifies the number of cycles after the first LMC 2466 fill cycle to wait before requesting a fill on the 2467 RSC/RSD bus. 2468 // 7 dclks (we've received 1st out of 8 2469 // by the time we start counting) 2470 ef_cnt = ((LMCn_CONFIG[MODE32b] ? 14 : 7) * 2471 dclk0_period) / rclk_period; 2472 // + 1 rclk if the dclk and rclk edges don't 2473 // stay in the same position 2474 if ((dclk0_gen.period % rclk_gen.period) != 0) 2475 ef_cnt = ef_cnt + 1; 2476 // + 2 rclk synchronization uncertainty 2477 ef_cnt = ef_cnt + 2; 2478 // - 3 rclks to recognize first write 2479 ef_cnt = ef_cnt - 3; 2480 // + 3 rclks to perform first write 2481 ef_cnt = ef_cnt + 3; 2482 // - 9 rclks minimum latency from counter expire 2483 // to final fbf read 2484 ef_cnt = ef_cnt - 9; */ 2485 uint64_t vab_thresh : 4; /**< VAB Threshold 2486 When the number of valid VABs exceeds this number the 2487 L2C increases the priority of all writes in the LMC. */ 2488 uint64_t disecc : 1; /**< Tag and Data ECC Disable */ 2489 uint64_t disidxalias : 1; /**< Index Alias Disable */ 2490#else 2491 uint64_t disidxalias : 1; 2492 uint64_t disecc : 1; 2493 uint64_t vab_thresh : 4; 2494 uint64_t ef_cnt : 7; 2495 uint64_t ef_ena : 1; 2496 uint64_t xmc_arb_mode : 1; 2497 uint64_t rsp_arb_mode : 1; 2498 uint64_t maxlfb : 4; 2499 uint64_t maxvab : 4; 2500 uint64_t discclk : 1; 2501 uint64_t l2dfdbe : 1; 2502 uint64_t l2dfsbe : 1; 2503 uint64_t disstgl2i : 1; 2504 uint64_t rdf_fast : 1; 2505 uint64_t sepcmt : 1; 2506 uint64_t reserved_30_63 : 34; 2507#endif 2508 } s; 2509 struct cvmx_l2c_ctl_cn61xx { 2510#ifdef __BIG_ENDIAN_BITFIELD 2511 uint64_t reserved_29_63 : 35; 2512 uint64_t rdf_fast : 1; /**< When 0, delay read data fifo from DCLK to RCLK by one 2513 cycle. Needed when DCLK:RCLK ratio > 3:1. Should be 2514 set before DDR traffic begins and only changed when 2515 memory traffic is idle. */ 2516 uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */ 2517 uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */ 2518 uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */ 2519 uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */ 2520 uint64_t maxvab : 4; /**< Maximum VABs in use at once 2521 (0 means 16, 1-15 as expected) */ 2522 uint64_t maxlfb : 4; /**< Maximum LFBs in use at once 2523 (0 means 16, 1-15 as expected) */ 2524 uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus 2525 == 0, round-robin 2526 == 1, static priority 2527 1. IOR data 2528 2. STIN/FILLs 2529 3. STDN/SCDN/SCFL */ 2530 uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues 2531 == 0, fully determined through QOS 2532 == 1, QOS0 highest priority, QOS1-3 use normal mode */ 2533 uint64_t ef_ena : 1; /**< LMC early fill enable */ 2534 uint64_t ef_cnt : 7; /**< LMC early fill count 2535 Specifies the number of cycles after the first LMC 2536 fill cycle to wait before requesting a fill on the 2537 RSC/RSD bus. 2538 // 7 dclks (we've received 1st out of 8 2539 // by the time we start counting) 2540 ef_cnt = ((LMCn_CONFIG[MODE32b] ? 14 : 7) * 2541 dclk0_period) / rclk_period; 2542 // + 1 rclk if the dclk and rclk edges don't 2543 // stay in the same position 2544 if ((dclk0_gen.period % rclk_gen.period) != 0) 2545 ef_cnt = ef_cnt + 1; 2546 // + 2 rclk synchronization uncertainty 2547 ef_cnt = ef_cnt + 2; 2548 // - 3 rclks to recognize first write 2549 ef_cnt = ef_cnt - 3; 2550 // + 3 rclks to perform first write 2551 ef_cnt = ef_cnt + 3; 2552 // - 9 rclks minimum latency from counter expire 2553 // to final fbf read 2554 ef_cnt = ef_cnt - 9; */ 2555 uint64_t vab_thresh : 4; /**< VAB Threshold 2556 When the number of valid VABs exceeds this number the 2557 L2C increases the priority of all writes in the LMC. */ 2558 uint64_t disecc : 1; /**< Tag and Data ECC Disable */ 2559 uint64_t disidxalias : 1; /**< Index Alias Disable */ 2560#else 2561 uint64_t disidxalias : 1; 2562 uint64_t disecc : 1; 2563 uint64_t vab_thresh : 4; 2564 uint64_t ef_cnt : 7; 2565 uint64_t ef_ena : 1; 2566 uint64_t xmc_arb_mode : 1; 2567 uint64_t rsp_arb_mode : 1; 2568 uint64_t maxlfb : 4; 2569 uint64_t maxvab : 4; 2570 uint64_t discclk : 1; 2571 uint64_t l2dfdbe : 1; 2572 uint64_t l2dfsbe : 1; 2573 uint64_t disstgl2i : 1; 2574 uint64_t rdf_fast : 1; 2575 uint64_t reserved_29_63 : 35; 2576#endif 2577 } cn61xx; 2578 struct cvmx_l2c_ctl_cn63xx { 2579#ifdef __BIG_ENDIAN_BITFIELD 2580 uint64_t reserved_28_63 : 36; 2581 uint64_t disstgl2i : 1; /**< Disable STGL2I's from changing the tags */ 2582 uint64_t l2dfsbe : 1; /**< Force single bit ECC error on PL2 allocates (2) */ 2583 uint64_t l2dfdbe : 1; /**< Force double bit ECC error on PL2 allocates (2) */ 2584 uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */ 2585 uint64_t maxvab : 4; /**< Maximum VABs in use at once 2586 (0 means 16, 1-15 as expected) */ 2587 uint64_t maxlfb : 4; /**< Maximum LFBs in use at once 2588 (0 means 16, 1-15 as expected) */ 2589 uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus 2590 == 0, round-robin 2591 == 1, static priority 2592 1. IOR data 2593 2. STIN/FILLs 2594 3. STDN/SCDN/SCFL */ 2595 uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues 2596 == 0, fully determined through QOS 2597 == 1, QOS0 highest priority, QOS1-3 use normal mode */ 2598 uint64_t ef_ena : 1; /**< LMC early fill enable */ 2599 uint64_t ef_cnt : 7; /**< LMC early fill count 2600 Specifies the number of cycles after the first LMC 2601 fill cycle to wait before requesting a fill on the 2602 RSC/RSD bus. 2603 // 7 dclks (we've received 1st out of 8 2604 // by the time we start counting) 2605 ef_cnt = (7 * dclk0_period) / rclk_period; 2606 // + 1 rclk if the dclk and rclk edges don't 2607 // stay in the same position 2608 if ((dclk0_gen.period % rclk_gen.period) != 0) 2609 ef_cnt = ef_cnt + 1; 2610 // + 2 rclk synchronization uncertainty 2611 ef_cnt = ef_cnt + 2; 2612 // - 3 rclks to recognize first write 2613 ef_cnt = ef_cnt - 3; 2614 // + 3 rclks to perform first write 2615 ef_cnt = ef_cnt + 3; 2616 // - 9 rclks minimum latency from counter expire 2617 // to final fbf read 2618 ef_cnt = ef_cnt - 9; */ 2619 uint64_t vab_thresh : 4; /**< VAB Threshold 2620 When the number of valid VABs exceeds this number the 2621 L2C increases the priority of all writes in the LMC. */ 2622 uint64_t disecc : 1; /**< Tag and Data ECC Disable */ 2623 uint64_t disidxalias : 1; /**< Index Alias Disable */ 2624#else 2625 uint64_t disidxalias : 1; 2626 uint64_t disecc : 1; 2627 uint64_t vab_thresh : 4; 2628 uint64_t ef_cnt : 7; 2629 uint64_t ef_ena : 1; 2630 uint64_t xmc_arb_mode : 1; 2631 uint64_t rsp_arb_mode : 1; 2632 uint64_t maxlfb : 4; 2633 uint64_t maxvab : 4; 2634 uint64_t discclk : 1; 2635 uint64_t l2dfdbe : 1; 2636 uint64_t l2dfsbe : 1; 2637 uint64_t disstgl2i : 1; 2638 uint64_t reserved_28_63 : 36; 2639#endif 2640 } cn63xx; 2641 struct cvmx_l2c_ctl_cn63xxp1 { 2642#ifdef __BIG_ENDIAN_BITFIELD 2643 uint64_t reserved_25_63 : 39; 2644 uint64_t discclk : 1; /**< Disable conditional clocking in L2C PNR blocks */ 2645 uint64_t maxvab : 4; /**< Maximum VABs in use at once 2646 (0 means 16, 1-15 as expected) */ 2647 uint64_t maxlfb : 4; /**< Maximum LFBs in use at once 2648 (0 means 16, 1-15 as expected) */ 2649 uint64_t rsp_arb_mode : 1; /**< Arbitration mode for RSC/RSD bus 2650 == 0, round-robin 2651 == 1, static priority 2652 1. IOR data 2653 2. STIN/FILLs 2654 3. STDN/SCDN/SCFL */ 2655 uint64_t xmc_arb_mode : 1; /**< Arbitration mode for XMC QOS queues 2656 == 0, fully determined through QOS 2657 == 1, QOS0 highest priority, QOS1-3 use normal mode */ 2658 uint64_t ef_ena : 1; /**< LMC early fill enable */ 2659 uint64_t ef_cnt : 7; /**< LMC early fill count 2660 Specifies the number of cycles after the first LMC 2661 fill cycle to wait before requesting a fill on the 2662 RSC/RSD bus. 2663 // 7 dclks (we've received 1st out of 8 2664 // by the time we start counting) 2665 ef_cnt = (7 * dclk0_period) / rclk_period; 2666 // + 1 rclk if the dclk and rclk edges don't 2667 // stay in the same position 2668 if ((dclk0_gen.period % rclk_gen.period) != 0) 2669 ef_cnt = ef_cnt + 1; 2670 // + 2 rclk synchronization uncertainty 2671 ef_cnt = ef_cnt + 2; 2672 // - 3 rclks to recognize first write 2673 ef_cnt = ef_cnt - 3; 2674 // + 3 rclks to perform first write 2675 ef_cnt = ef_cnt + 3; 2676 // - 9 rclks minimum latency from counter expire 2677 // to final fbf read 2678 ef_cnt = ef_cnt - 9; */ 2679 uint64_t vab_thresh : 4; /**< VAB Threshold 2680 When the number of valid VABs exceeds this number the 2681 L2C increases the priority of all writes in the LMC. */ 2682 uint64_t disecc : 1; /**< Tag and Data ECC Disable */ 2683 uint64_t disidxalias : 1; /**< Index Alias Disable */ 2684#else 2685 uint64_t disidxalias : 1; 2686 uint64_t disecc : 1; 2687 uint64_t vab_thresh : 4; 2688 uint64_t ef_cnt : 7; 2689 uint64_t ef_ena : 1; 2690 uint64_t xmc_arb_mode : 1; 2691 uint64_t rsp_arb_mode : 1; 2692 uint64_t maxlfb : 4; 2693 uint64_t maxvab : 4; 2694 uint64_t discclk : 1; 2695 uint64_t reserved_25_63 : 39; 2696#endif 2697 } cn63xxp1; 2698 struct cvmx_l2c_ctl_cn61xx cn66xx; 2699 struct cvmx_l2c_ctl_s cn68xx; 2700 struct cvmx_l2c_ctl_cn63xx cn68xxp1; 2701 struct cvmx_l2c_ctl_cn61xx cnf71xx; 2702}; 2703typedef union cvmx_l2c_ctl cvmx_l2c_ctl_t; 2704 2705/** 2706 * cvmx_l2c_dbg 2707 * 2708 * L2C_DBG = L2C DEBUG Register 2709 * 2710 * Description: L2C Tag/Data Store Debug Register 2711 * 2712 * Notes: 2713 * (1) When using the L2T, L2D or FINV Debug probe feature, the LDD command WILL NOT update the DuTags. 2714 * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one set) 2715 * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back 2716 * dirty data to memory to maintain coherency. 2717 * (4) L2 Cache Lock Down feature MUST BE disabled (L2C_LCKBASE[LCK_ENA]=0) if ANY of the L2C debug 2718 * features (L2T, L2D, FINV) are enabled. 2719 */ 2720union cvmx_l2c_dbg { 2721 uint64_t u64; 2722 struct cvmx_l2c_dbg_s { 2723#ifdef __BIG_ENDIAN_BITFIELD 2724 uint64_t reserved_15_63 : 49; 2725 uint64_t lfb_enum : 4; /**< Specifies the LFB Entry# which is to be captured. */ 2726 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2727 the LFB specified by LFB_ENUM[3:0] are captured 2728 into the L2C_LFB(0/1/2) registers. 2729 NOTE: Some fields of the LFB entry are unpredictable 2730 and dependent on usage. This is only intended to be 2731 used for HW debug. */ 2732 uint64_t ppnum : 4; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2733 is enabled, this field determines which one-of-16 2734 PPs is selected as the diagnostic PP. */ 2735 uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2736 is enabled, this field determines 1-of-n targeted 2737 sets to act upon. 2738 NOTE: L2C_DBG[SET] must never equal a crippled or 2739 unusable set (see UMSK* registers and Cripple mode 2740 fuses). */ 2741 uint64_t finv : 1; /**< Flush-Invalidate. 2742 When flush-invalidate is enable (FINV=1), all STF 2743 (L1 store-miss) commands generated from the diagnostic PP 2744 (L2C_DBG[PPNUM]) will invalidate the specified set 2745 (L2C_DBG[SET]) at the index specified in the STF 2746 address[17:7]. If a dirty block is detected (D=1), it is 2747 written back to memory. The contents of the invalid 2748 L2 Cache line is also 'scrubbed' with the STF write data. 2749 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2750 STF address[17:7] refers to the 'aliased' address. 2751 NOTE: An STF command with write data=ZEROES can be 2752 generated by SW using the Prefetch instruction with 2753 Hint=30d "prepare for Store", followed by a SYNCW. 2754 What is seen at the L2C as an STF w/wrdcnt=0 with all 2755 of its mask bits clear (indicates zero-fill data). 2756 A flush-invalidate will 'force-hit' the L2 cache at 2757 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2758 If the cache block is dirty, it is also written back 2759 to memory. The DuTag state is probed/updated as normal 2760 for an STF request. 2761 TYPICAL APPLICATIONS: 2762 1) L2 Tag/Data ECC SW Recovery 2763 2) Cache Unlocking 2764 NOTE: If the cacheline had been previously LOCKED(L=1), 2765 a flush-invalidate operation will explicitly UNLOCK 2766 (L=0) the set/index specified. 2767 NOTE: The diagnostic PP cores can generate STF 2768 commands to the L2 Cache whenever all 128 bytes in a 2769 block are written. SW must take this into consideration 2770 to avoid 'errant' Flush-Invalidates. */ 2771 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2772 returned directly from the L2 Data Store 2773 (regardless of hit/miss) when an LDD(L1 load-miss) command 2774 is issued from a PP determined by the L2C_DBG[PPNUM] 2775 field. The selected set# is determined by the 2776 L2C_DBG[SET] field, and the index is determined 2777 from the address[17:7] associated with the LDD 2778 command. 2779 This 'force-hit' will NOT alter the current L2 Tag 2780 state OR the DuTag state. */ 2781 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:18]] 2782 is returned on the data bus starting at +32(and +96) bytes 2783 offset from the beginning of cacheline when an LDD 2784 (L1 load-miss) command is issued from a PP determined by 2785 the L2C_DBG[PPNUM] field. 2786 The selected L2 set# is determined by the L2C_DBG[SET] 2787 field, and the L2 index is determined from the 2788 phys_addr[17:7] associated with the LDD command. 2789 This 'L2 force-hit' will NOT alter the current L2 Tag 2790 state OR the DuTag state. 2791 NOTE: The diagnostic PP should issue a d-stream load 2792 to an aligned cacheline+0x20(+0x60) in order to have the 2793 return VDLUTAG information (in OW2/OW6) written directly 2794 into the proper PP register. The diagnostic PP should also 2795 flush it's local L1 cache after use(to ensure data 2796 coherency). 2797 NOTE: The position of the VDLUTAG data in the destination 2798 register is dependent on the endian mode(big/little). 2799 NOTE: N3K-Pass2 modification. (This bit's functionality 2800 has changed since Pass1-in the following way). 2801 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2802 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2803 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2804 conditionally latched into the L2D_FSYN0/1 CSRs if an 2805 LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ 2806#else 2807 uint64_t l2t : 1; 2808 uint64_t l2d : 1; 2809 uint64_t finv : 1; 2810 uint64_t set : 3; 2811 uint64_t ppnum : 4; 2812 uint64_t lfb_dmp : 1; 2813 uint64_t lfb_enum : 4; 2814 uint64_t reserved_15_63 : 49; 2815#endif 2816 } s; 2817 struct cvmx_l2c_dbg_cn30xx { 2818#ifdef __BIG_ENDIAN_BITFIELD 2819 uint64_t reserved_13_63 : 51; 2820 uint64_t lfb_enum : 2; /**< Specifies the LFB Entry# which is to be captured. */ 2821 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2822 the LFB specified by LFB_ENUM are captured 2823 into the L2C_LFB(0/1/2) registers. 2824 NOTE: Some fields of the LFB entry are unpredictable 2825 and dependent on usage. This is only intended to be 2826 used for HW debug. */ 2827 uint64_t reserved_7_9 : 3; 2828 uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2829 is enabled, this field determines which 2830 PP is selected as the diagnostic PP. 2831 NOTE: For CN30XX single core PPNUM=0 (MBZ) */ 2832 uint64_t reserved_5_5 : 1; 2833 uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2834 is enabled, this field determines 1-of-n targeted 2835 sets to act upon. 2836 NOTE: L2C_DBG[SET] must never equal a crippled or 2837 unusable set (see UMSK* registers and Cripple mode 2838 fuses). */ 2839 uint64_t finv : 1; /**< Flush-Invalidate. 2840 When flush-invalidate is enable (FINV=1), all STF 2841 (L1 store-miss) commands generated from the PP will invalidate 2842 the specified set(L2C_DBG[SET]) at the index specified 2843 in the STF address[14:7]. If a dirty block is detected(D=1), 2844 it is written back to memory. The contents of the invalid 2845 L2 Cache line is also 'scrubbed' with the STF write data. 2846 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2847 STF address[14:7] refers to the 'aliased' address. 2848 NOTE: An STF command with write data=ZEROES can be 2849 generated by SW using the Prefetch instruction with 2850 Hint=30d "prepare for Store", followed by a SYNCW. 2851 What is seen at the L2C as an STF w/wrdcnt=0 with all 2852 of its mask bits clear (indicates zero-fill data). 2853 A flush-invalidate will 'force-hit' the L2 cache at 2854 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2855 If the cache block is dirty, it is also written back 2856 to memory. The DuTag state is probed/updated as normal 2857 for an STF request. 2858 TYPICAL APPLICATIONS: 2859 1) L2 Tag/Data ECC SW Recovery 2860 2) Cache Unlocking 2861 NOTE: If the cacheline had been previously LOCKED(L=1), 2862 a flush-invalidate operation will explicitly UNLOCK 2863 (L=0) the set/index specified. 2864 NOTE: The PP can generate STF(L1 store-miss) 2865 commands to the L2 Cache whenever all 128 bytes in a 2866 block are written. SW must take this into consideration 2867 to avoid 'errant' Flush-Invalidates. */ 2868 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2869 returned directly from the L2 Data Store 2870 (regardless of hit/miss) when an LDD(L1 load-miss) 2871 command is issued from the PP. 2872 The selected set# is determined by the 2873 L2C_DBG[SET] field, and the index is determined 2874 from the address[14:7] associated with the LDD 2875 command. 2876 This 'force-hit' will NOT alter the current L2 Tag 2877 state OR the DuTag state. */ 2878 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:15]] 2879 is returned on the data bus starting at +32(and +96) bytes 2880 offset from the beginning of cacheline when an LDD 2881 (L1 load-miss) command is issued from the PP. 2882 The selected L2 set# is determined by the L2C_DBG[SET] 2883 field, and the L2 index is determined from the 2884 phys_addr[14:7] associated with the LDD command. 2885 This 'L2 force-hit' will NOT alter the current L2 Tag 2886 state OR the DuTag state. 2887 NOTE: The diagnostic PP should issue a d-stream load 2888 to an aligned cacheline+0x20(+0x60) in order to have the 2889 return VDLUTAG information (in OW2/OW6) written directly 2890 into the proper PP register. The diagnostic PP should also 2891 flush it's local L1 cache after use(to ensure data 2892 coherency). 2893 NOTE: The position of the VDLUTAG data in the destination 2894 register is dependent on the endian mode(big/little). 2895 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2896 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2897 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2898 conditionally latched into the L2D_FSYN0/1 CSRs if an 2899 LDD(L1 load-miss) is detected. */ 2900#else 2901 uint64_t l2t : 1; 2902 uint64_t l2d : 1; 2903 uint64_t finv : 1; 2904 uint64_t set : 2; 2905 uint64_t reserved_5_5 : 1; 2906 uint64_t ppnum : 1; 2907 uint64_t reserved_7_9 : 3; 2908 uint64_t lfb_dmp : 1; 2909 uint64_t lfb_enum : 2; 2910 uint64_t reserved_13_63 : 51; 2911#endif 2912 } cn30xx; 2913 struct cvmx_l2c_dbg_cn31xx { 2914#ifdef __BIG_ENDIAN_BITFIELD 2915 uint64_t reserved_14_63 : 50; 2916 uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ 2917 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 2918 the LFB specified by LFB_ENUM are captured 2919 into the L2C_LFB(0/1/2) registers. 2920 NOTE: Some fields of the LFB entry are unpredictable 2921 and dependent on usage. This is only intended to be 2922 used for HW debug. */ 2923 uint64_t reserved_7_9 : 3; 2924 uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2925 is enabled, this field determines which 2926 PP is selected as the diagnostic PP. */ 2927 uint64_t reserved_5_5 : 1; 2928 uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 2929 is enabled, this field determines 1-of-n targeted 2930 sets to act upon. 2931 NOTE: L2C_DBG[SET] must never equal a crippled or 2932 unusable set (see UMSK* registers and Cripple mode 2933 fuses). */ 2934 uint64_t finv : 1; /**< Flush-Invalidate. 2935 When flush-invalidate is enable (FINV=1), all STF 2936 (L1 store-miss) commands generated from the diagnostic PP 2937 (L2C_DBG[PPNUM]) will invalidate the specified set 2938 (L2C_DBG[SET]) at the index specified in the STF 2939 address[15:7]. If a dirty block is detected (D=1), it is 2940 written back to memory. The contents of the invalid 2941 L2 Cache line is also 'scrubbed' with the STF write data. 2942 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 2943 STF address[15:7] refers to the 'aliased' address. 2944 NOTE: An STF command with write data=ZEROES can be 2945 generated by SW using the Prefetch instruction with 2946 Hint=30d "prepare for Store", followed by a SYNCW. 2947 What is seen at the L2C as an STF w/wrdcnt=0 with all 2948 of its mask bits clear (indicates zero-fill data). 2949 A flush-invalidate will 'force-hit' the L2 cache at 2950 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 2951 If the cache block is dirty, it is also written back 2952 to memory. The DuTag state is probed/updated as normal 2953 for an STF request. 2954 TYPICAL APPLICATIONS: 2955 1) L2 Tag/Data ECC SW Recovery 2956 2) Cache Unlocking 2957 NOTE: If the cacheline had been previously LOCKED(L=1), 2958 a flush-invalidate operation will explicitly UNLOCK 2959 (L=0) the set/index specified. 2960 NOTE: The diagnostic PP cores can generate STF(L1 store-miss) 2961 commands to the L2 Cache whenever all 128 bytes in a 2962 block are written. SW must take this into consideration 2963 to avoid 'errant' Flush-Invalidates. */ 2964 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 2965 returned directly from the L2 Data Store 2966 (regardless of hit/miss) when an LDD(L1 load-miss) 2967 command is issued from a PP determined by the 2968 L2C_DBG[PPNUM] field. The selected set# is determined 2969 by the L2C_DBG[SET] field, and the index is determined 2970 from the address[15:7] associated with the LDD command. 2971 This 'L2 force-hit' will NOT alter the current L2 Tag 2972 state OR the DuTag state. */ 2973 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]] 2974 is returned on the data bus starting at +32(and +96) bytes 2975 offset from the beginning of cacheline when an LDD 2976 (L1 load-miss) command is issued from a PP determined by 2977 the L2C_DBG[PPNUM] field. 2978 The selected L2 set# is determined by the L2C_DBG[SET] 2979 field, and the L2 index is determined from the 2980 phys_addr[15:7] associated with the LDD command. 2981 This 'L2 force-hit' will NOT alter the current L2 Tag 2982 state OR the DuTag state. 2983 NOTE: The diagnostic PP should issue a d-stream load 2984 to an aligned cacheline+0x20(+0x60) in order to have the 2985 return VDLUTAG information (in OW2/OW6) written directly 2986 into the proper PP register. The diagnostic PP should also 2987 flush it's local L1 cache after use(to ensure data 2988 coherency). 2989 NOTE: The position of the VDLUTAG data in the destination 2990 register is dependent on the endian mode(big/little). 2991 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 2992 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 2993 half cacheline (see: L2D_ERR[BMHCLSEL] is also 2994 conditionally latched into the L2D_FSYN0/1 CSRs if an 2995 LDD(L1 load-miss) is detected from the diagnostic PP 2996 (L2C_DBG[PPNUM]). */ 2997#else 2998 uint64_t l2t : 1; 2999 uint64_t l2d : 1; 3000 uint64_t finv : 1; 3001 uint64_t set : 2; 3002 uint64_t reserved_5_5 : 1; 3003 uint64_t ppnum : 1; 3004 uint64_t reserved_7_9 : 3; 3005 uint64_t lfb_dmp : 1; 3006 uint64_t lfb_enum : 3; 3007 uint64_t reserved_14_63 : 50; 3008#endif 3009 } cn31xx; 3010 struct cvmx_l2c_dbg_s cn38xx; 3011 struct cvmx_l2c_dbg_s cn38xxp2; 3012 struct cvmx_l2c_dbg_cn50xx { 3013#ifdef __BIG_ENDIAN_BITFIELD 3014 uint64_t reserved_14_63 : 50; 3015 uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ 3016 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 3017 the LFB specified by LFB_ENUM[2:0] are captured 3018 into the L2C_LFB(0/1/2) registers. 3019 NOTE: Some fields of the LFB entry are unpredictable 3020 and dependent on usage. This is only intended to be 3021 used for HW debug. */ 3022 uint64_t reserved_7_9 : 3; 3023 uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 3024 is enabled, this field determines which 1-of-2 3025 PPs is selected as the diagnostic PP. */ 3026 uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 3027 is enabled, this field determines 1-of-n targeted 3028 sets to act upon. 3029 NOTE: L2C_DBG[SET] must never equal a crippled or 3030 unusable set (see UMSK* registers and Cripple mode 3031 fuses). */ 3032 uint64_t finv : 1; /**< Flush-Invalidate. 3033 When flush-invalidate is enable (FINV=1), all STF 3034 (L1 store-miss) commands generated from the diagnostic PP 3035 (L2C_DBG[PPNUM]) will invalidate the specified set 3036 (L2C_DBG[SET]) at the index specified in the STF 3037 address[13:7]. If a dirty block is detected (D=1), it is 3038 written back to memory. The contents of the invalid 3039 L2 Cache line is also 'scrubbed' with the STF write data. 3040 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 3041 STF address[13:7] refers to the 'aliased' address. 3042 NOTE: An STF command with write data=ZEROES can be 3043 generated by SW using the Prefetch instruction with 3044 Hint=30d "prepare for Store", followed by a SYNCW. 3045 What is seen at the L2C as an STF w/wrdcnt=0 with all 3046 of its mask bits clear (indicates zero-fill data). 3047 A flush-invalidate will 'force-hit' the L2 cache at 3048 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 3049 If the cache block is dirty, it is also written back 3050 to memory. The DuTag state is probed/updated as normal 3051 for an STF request. 3052 TYPICAL APPLICATIONS: 3053 1) L2 Tag/Data ECC SW Recovery 3054 2) Cache Unlocking 3055 NOTE: If the cacheline had been previously LOCKED(L=1), 3056 a flush-invalidate operation will explicitly UNLOCK 3057 (L=0) the set/index specified. 3058 NOTE: The diagnostic PP cores can generate STF 3059 commands to the L2 Cache whenever all 128 bytes in a 3060 block are written. SW must take this into consideration 3061 to avoid 'errant' Flush-Invalidates. */ 3062 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 3063 returned directly from the L2 Data Store 3064 (regardless of hit/miss) when an LDD(L1 load-miss) command 3065 is issued from a PP determined by the L2C_DBG[PPNUM] 3066 field. The selected set# is determined by the 3067 L2C_DBG[SET] field, and the index is determined 3068 from the address[13:7] associated with the LDD 3069 command. 3070 This 'force-hit' will NOT alter the current L2 Tag 3071 state OR the DuTag state. */ 3072 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:14]] 3073 is returned on the data bus starting at +32(and +96) bytes 3074 offset from the beginning of cacheline when an LDD 3075 (L1 load-miss) command is issued from a PP determined by 3076 the L2C_DBG[PPNUM] field. 3077 The selected L2 set# is determined by the L2C_DBG[SET] 3078 field, and the L2 index is determined from the 3079 phys_addr[13:7] associated with the LDD command. 3080 This 'L2 force-hit' will NOT alter the current L2 Tag 3081 state OR the DuTag state. 3082 NOTE: The diagnostic PP should issue a d-stream load 3083 to an aligned cacheline+0x20(+0x60) in order to have the 3084 return VDLUTAG information (in OW2/OW6) written directly 3085 into the proper PP register. The diagnostic PP should also 3086 flush it's local L1 cache after use(to ensure data 3087 coherency). 3088 NOTE: The position of the VDLUTAG data in the destination 3089 register is dependent on the endian mode(big/little). 3090 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 3091 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 3092 half cacheline (see: L2D_ERR[BMHCLSEL] is also 3093 conditionally latched into the L2D_FSYN0/1 CSRs if an 3094 LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ 3095#else 3096 uint64_t l2t : 1; 3097 uint64_t l2d : 1; 3098 uint64_t finv : 1; 3099 uint64_t set : 3; 3100 uint64_t ppnum : 1; 3101 uint64_t reserved_7_9 : 3; 3102 uint64_t lfb_dmp : 1; 3103 uint64_t lfb_enum : 3; 3104 uint64_t reserved_14_63 : 50; 3105#endif 3106 } cn50xx; 3107 struct cvmx_l2c_dbg_cn52xx { 3108#ifdef __BIG_ENDIAN_BITFIELD 3109 uint64_t reserved_14_63 : 50; 3110 uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ 3111 uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of 3112 the LFB specified by LFB_ENUM[2:0] are captured 3113 into the L2C_LFB(0/1/2) registers. 3114 NOTE: Some fields of the LFB entry are unpredictable 3115 and dependent on usage. This is only intended to be 3116 used for HW debug. */ 3117 uint64_t reserved_8_9 : 2; 3118 uint64_t ppnum : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 3119 is enabled, this field determines which 1-of-4 3120 PPs is selected as the diagnostic PP. */ 3121 uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] 3122 is enabled, this field determines 1-of-n targeted 3123 sets to act upon. 3124 NOTE: L2C_DBG[SET] must never equal a crippled or 3125 unusable set (see UMSK* registers and Cripple mode 3126 fuses). */ 3127 uint64_t finv : 1; /**< Flush-Invalidate. 3128 When flush-invalidate is enable (FINV=1), all STF 3129 (L1 store-miss) commands generated from the diagnostic PP 3130 (L2C_DBG[PPNUM]) will invalidate the specified set 3131 (L2C_DBG[SET]) at the index specified in the STF 3132 address[15:7]. If a dirty block is detected (D=1), it is 3133 written back to memory. The contents of the invalid 3134 L2 Cache line is also 'scrubbed' with the STF write data. 3135 NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in 3136 STF address[15:7] refers to the 'aliased' address. 3137 NOTE: An STF command with write data=ZEROES can be 3138 generated by SW using the Prefetch instruction with 3139 Hint=30d "prepare for Store", followed by a SYNCW. 3140 What is seen at the L2C as an STF w/wrdcnt=0 with all 3141 of its mask bits clear (indicates zero-fill data). 3142 A flush-invalidate will 'force-hit' the L2 cache at 3143 [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). 3144 If the cache block is dirty, it is also written back 3145 to memory. The DuTag state is probed/updated as normal 3146 for an STF request. 3147 TYPICAL APPLICATIONS: 3148 1) L2 Tag/Data ECC SW Recovery 3149 2) Cache Unlocking 3150 NOTE: If the cacheline had been previously LOCKED(L=1), 3151 a flush-invalidate operation will explicitly UNLOCK 3152 (L=0) the set/index specified. 3153 NOTE: The diagnostic PP cores can generate STF 3154 commands to the L2 Cache whenever all 128 bytes in a 3155 block are written. SW must take this into consideration 3156 to avoid 'errant' Flush-Invalidates. */ 3157 uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is 3158 returned directly from the L2 Data Store 3159 (regardless of hit/miss) when an LDD(L1 load-miss) command 3160 is issued from a PP determined by the L2C_DBG[PPNUM] 3161 field. The selected set# is determined by the 3162 L2C_DBG[SET] field, and the index is determined 3163 from the address[15:7] associated with the LDD 3164 command. 3165 This 'force-hit' will NOT alter the current L2 Tag 3166 state OR the DuTag state. */ 3167 uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]] 3168 is returned on the data bus starting at +32(and +96) bytes 3169 offset from the beginning of cacheline when an LDD 3170 (L1 load-miss) command is issued from a PP determined by 3171 the L2C_DBG[PPNUM] field. 3172 The selected L2 set# is determined by the L2C_DBG[SET] 3173 field, and the L2 index is determined from the 3174 phys_addr[15:7] associated with the LDD command. 3175 This 'L2 force-hit' will NOT alter the current L2 Tag 3176 state OR the DuTag state. 3177 NOTE: The diagnostic PP should issue a d-stream load 3178 to an aligned cacheline+0x20(+0x60) in order to have the 3179 return VDLUTAG information (in OW2/OW6) written directly 3180 into the proper PP register. The diagnostic PP should also 3181 flush it's local L1 cache after use(to ensure data 3182 coherency). 3183 NOTE: The position of the VDLUTAG data in the destination 3184 register is dependent on the endian mode(big/little). 3185 NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): 3186 If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected 3187 half cacheline (see: L2D_ERR[BMHCLSEL] is also 3188 conditionally latched into the L2D_FSYN0/1 CSRs if an 3189 LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ 3190#else 3191 uint64_t l2t : 1; 3192 uint64_t l2d : 1; 3193 uint64_t finv : 1; 3194 uint64_t set : 3; 3195 uint64_t ppnum : 2; 3196 uint64_t reserved_8_9 : 2; 3197 uint64_t lfb_dmp : 1; 3198 uint64_t lfb_enum : 3; 3199 uint64_t reserved_14_63 : 50; 3200#endif 3201 } cn52xx; 3202 struct cvmx_l2c_dbg_cn52xx cn52xxp1; 3203 struct cvmx_l2c_dbg_s cn56xx; 3204 struct cvmx_l2c_dbg_s cn56xxp1; 3205 struct cvmx_l2c_dbg_s cn58xx; 3206 struct cvmx_l2c_dbg_s cn58xxp1; 3207}; 3208typedef union cvmx_l2c_dbg cvmx_l2c_dbg_t; 3209 3210/** 3211 * cvmx_l2c_dut 3212 * 3213 * L2C_DUT = L2C DUTAG Register 3214 * 3215 * Description: L2C Duplicate Tag State Register 3216 * 3217 * Notes: 3218 * (1) When using the L2T, L2D or FINV Debug probe feature, an LDD command issued by the diagnostic PP 3219 * WILL NOT update the DuTags. 3220 * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one enabled at a time). 3221 * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back 3222 * dirty data to memory to maintain coherency. (A side effect of FINV is that an LDD L2 fill is 3223 * launched which fills data into the L2 DS). 3224 */ 3225union cvmx_l2c_dut { 3226 uint64_t u64; 3227 struct cvmx_l2c_dut_s { 3228#ifdef __BIG_ENDIAN_BITFIELD 3229 uint64_t reserved_32_63 : 32; 3230 uint64_t dtena : 1; /**< DuTag Diagnostic read enable. 3231 When L2C_DUT[DTENA]=1, all LDD(L1 load-miss) 3232 commands issued from the diagnostic PP 3233 (L2C_DBG[PPNUM]) will capture the DuTag state (V|L1TAG) 3234 of the PP#(specified in the LDD address[29:26] into 3235 the L2C_DUT CSR register. This allows the diagPP to 3236 read ALL DuTags (from any PP). 3237 The DuTag Set# to capture is extracted from the LDD 3238 address[25:20]. The diagnostic PP would issue the 3239 LDD then read the L2C_DUT register (one at a time). 3240 This LDD 'L2 force-hit' will NOT alter the current L2 3241 Tag State OR the DuTag state. 3242 NOTE: For CN58XX the DuTag SIZE has doubled (to 16KB) 3243 where each DuTag is organized as 2x 64-way entries. 3244 The LDD address[7] determines which 1(of-2) internal 3245 64-ways to select. 3246 The fill data is returned directly from the L2 Data 3247 Store(regardless of hit/miss) when an LDD command 3248 is issued from a PP determined by the L2C_DBG[PPNUM] 3249 field. The selected L2 Set# is determined by the 3250 L2C_DBG[SET] field, and the index is determined 3251 from the address[17:7] associated with the LDD 3252 command. 3253 This 'L2 force-hit' will NOT alter the current L2 Tag 3254 state OR the DuTag state. 3255 NOTE: In order for the DiagPP to generate an LDD command 3256 to the L2C, it must first force an L1 Dcache flush. */ 3257 uint64_t reserved_30_30 : 1; 3258 uint64_t dt_vld : 1; /**< Duplicate L1 Tag Valid bit latched in for previous 3259 LDD(L1 load-miss) command sourced by diagnostic PP. */ 3260 uint64_t dt_tag : 29; /**< Duplicate L1 Tag[35:7] latched in for previous 3261 LDD(L1 load-miss) command sourced by diagnostic PP. */ 3262#else 3263 uint64_t dt_tag : 29; 3264 uint64_t dt_vld : 1; 3265 uint64_t reserved_30_30 : 1; 3266 uint64_t dtena : 1; 3267 uint64_t reserved_32_63 : 32; 3268#endif 3269 } s; 3270 struct cvmx_l2c_dut_s cn30xx; 3271 struct cvmx_l2c_dut_s cn31xx; 3272 struct cvmx_l2c_dut_s cn38xx; 3273 struct cvmx_l2c_dut_s cn38xxp2; 3274 struct cvmx_l2c_dut_s cn50xx; 3275 struct cvmx_l2c_dut_s cn52xx; 3276 struct cvmx_l2c_dut_s cn52xxp1; 3277 struct cvmx_l2c_dut_s cn56xx; 3278 struct cvmx_l2c_dut_s cn56xxp1; 3279 struct cvmx_l2c_dut_s cn58xx; 3280 struct cvmx_l2c_dut_s cn58xxp1; 3281}; 3282typedef union cvmx_l2c_dut cvmx_l2c_dut_t; 3283 3284/** 3285 * cvmx_l2c_dut_map# 3286 * 3287 * L2C_DUT_MAP = L2C DUT memory map region 3288 * 3289 * Description: Address of the start of the region mapped to the duplicate tag. Can be used to read 3290 * and write the raw duplicate tag CAM. Writes should be used only with great care as they can easily 3291 * destroy the coherency of the memory system. In any case this region is expected to only be used 3292 * for debug. 3293 * 3294 * This base address should be combined with PP virtual ID, L1 way and L1 set to produce the final 3295 * address as follows: 3296 * addr<63:13> L2C_DUT_MAP<63:13> 3297 * addr<12:11> PP VID 3298 * addr<10:6> L1 way 3299 * addr<5:3> L1 set 3300 * addr<2:0> UNUSED 3301 * 3302 * Notes: 3303 * (1) The tag is 37:10 from the 38-bit OCTEON physical address after hole removal. (The hole is between DR0 3304 * and DR1. Remove the hole by subtracting 256MB from 38-bit OCTEON L2/DRAM physical addresses >= 512 MB.) 3305 */ 3306union cvmx_l2c_dut_mapx { 3307 uint64_t u64; 3308 struct cvmx_l2c_dut_mapx_s { 3309#ifdef __BIG_ENDIAN_BITFIELD 3310 uint64_t reserved_38_63 : 26; 3311 uint64_t tag : 28; /**< The tag value (see Note 1) */ 3312 uint64_t reserved_1_9 : 9; 3313 uint64_t valid : 1; /**< The valid bit */ 3314#else 3315 uint64_t valid : 1; 3316 uint64_t reserved_1_9 : 9; 3317 uint64_t tag : 28; 3318 uint64_t reserved_38_63 : 26; 3319#endif 3320 } s; 3321 struct cvmx_l2c_dut_mapx_s cn61xx; 3322 struct cvmx_l2c_dut_mapx_s cn63xx; 3323 struct cvmx_l2c_dut_mapx_s cn63xxp1; 3324 struct cvmx_l2c_dut_mapx_s cn66xx; 3325 struct cvmx_l2c_dut_mapx_s cn68xx; 3326 struct cvmx_l2c_dut_mapx_s cn68xxp1; 3327 struct cvmx_l2c_dut_mapx_s cnf71xx; 3328}; 3329typedef union cvmx_l2c_dut_mapx cvmx_l2c_dut_mapx_t; 3330 3331/** 3332 * cvmx_l2c_err_tdt# 3333 * 3334 * L2C_ERR_TDT = L2C TAD DaTa Error Info 3335 * 3336 * 3337 * Notes: 3338 * (1) If the status bit corresponding to the value of the TYPE field is not set the WAYIDX/SYN fields 3339 * are not associated with the errors currently logged by the status bits and should be ignored. 3340 * This can occur, for example, because of a race between a write to clear a DBE and a new, lower 3341 * priority, SBE error occuring. If the SBE arrives prior to the DBE clear the WAYIDX/SYN fields 3342 * will still be locked, but the new SBE error status bit will still be set. 3343 * 3344 * (2) The four types of errors have differing priorities. Priority (from lowest to highest) is SBE, 3345 * VSBE, DBE, VDBE. A error will lock the WAYIDX, and SYN fields for other errors of equal or 3346 * lower priority until cleared by software. This means that the error information is always 3347 * (assuming the TYPE field matches) for the highest priority error logged in the status bits. 3348 * 3349 * (3) If VSBE or VDBE are set (and the TYPE field matches), the WAYIDX fields are valid and the 3350 * syndrome can be found in L2C_ERR_VBF. 3351 * 3352 * (4) The syndrome is recorded for DBE errors, though the utility of the value is not clear. 3353 */ 3354union cvmx_l2c_err_tdtx { 3355 uint64_t u64; 3356 struct cvmx_l2c_err_tdtx_s { 3357#ifdef __BIG_ENDIAN_BITFIELD 3358 uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */ 3359 uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */ 3360 uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */ 3361 uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */ 3362 uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */ 3363 uint64_t reserved_22_49 : 28; 3364 uint64_t wayidx : 18; /**< Way, index, OW of the L2 block containing the error */ 3365 uint64_t reserved_2_3 : 2; 3366 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3367 0 - VSBE 3368 1 - VDBE 3369 2 - SBE 3370 3 - DBE */ 3371#else 3372 uint64_t type : 2; 3373 uint64_t reserved_2_3 : 2; 3374 uint64_t wayidx : 18; 3375 uint64_t reserved_22_49 : 28; 3376 uint64_t syn : 10; 3377 uint64_t vsbe : 1; 3378 uint64_t vdbe : 1; 3379 uint64_t sbe : 1; 3380 uint64_t dbe : 1; 3381#endif 3382 } s; 3383 struct cvmx_l2c_err_tdtx_cn61xx { 3384#ifdef __BIG_ENDIAN_BITFIELD 3385 uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */ 3386 uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */ 3387 uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */ 3388 uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */ 3389 uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */ 3390 uint64_t reserved_20_49 : 30; 3391 uint64_t wayidx : 16; /**< Way, index, OW of the L2 block containing the error */ 3392 uint64_t reserved_2_3 : 2; 3393 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3394 0 - VSBE 3395 1 - VDBE 3396 2 - SBE 3397 3 - DBE */ 3398#else 3399 uint64_t type : 2; 3400 uint64_t reserved_2_3 : 2; 3401 uint64_t wayidx : 16; 3402 uint64_t reserved_20_49 : 30; 3403 uint64_t syn : 10; 3404 uint64_t vsbe : 1; 3405 uint64_t vdbe : 1; 3406 uint64_t sbe : 1; 3407 uint64_t dbe : 1; 3408#endif 3409 } cn61xx; 3410 struct cvmx_l2c_err_tdtx_cn63xx { 3411#ifdef __BIG_ENDIAN_BITFIELD 3412 uint64_t dbe : 1; /**< L2D Double-Bit error has occurred */ 3413 uint64_t sbe : 1; /**< L2D Single-Bit error has occurred */ 3414 uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */ 3415 uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */ 3416 uint64_t syn : 10; /**< L2D syndrome (valid only for SBE/DBE, not VSBE/VDBE) */ 3417 uint64_t reserved_21_49 : 29; 3418 uint64_t wayidx : 17; /**< Way, index, OW of the L2 block containing the error */ 3419 uint64_t reserved_2_3 : 2; 3420 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3421 0 - VSBE 3422 1 - VDBE 3423 2 - SBE 3424 3 - DBE */ 3425#else 3426 uint64_t type : 2; 3427 uint64_t reserved_2_3 : 2; 3428 uint64_t wayidx : 17; 3429 uint64_t reserved_21_49 : 29; 3430 uint64_t syn : 10; 3431 uint64_t vsbe : 1; 3432 uint64_t vdbe : 1; 3433 uint64_t sbe : 1; 3434 uint64_t dbe : 1; 3435#endif 3436 } cn63xx; 3437 struct cvmx_l2c_err_tdtx_cn63xx cn63xxp1; 3438 struct cvmx_l2c_err_tdtx_cn63xx cn66xx; 3439 struct cvmx_l2c_err_tdtx_s cn68xx; 3440 struct cvmx_l2c_err_tdtx_s cn68xxp1; 3441 struct cvmx_l2c_err_tdtx_cn61xx cnf71xx; 3442}; 3443typedef union cvmx_l2c_err_tdtx cvmx_l2c_err_tdtx_t; 3444 3445/** 3446 * cvmx_l2c_err_ttg# 3447 * 3448 * L2C_ERR_TTG = L2C TAD TaG Error Info 3449 * 3450 * 3451 * Notes: 3452 * (1) The priority of errors (highest to lowest) is DBE, SBE, NOWAY. An error will lock the SYN, and 3453 * WAYIDX fields for equal or lower priority errors until cleared by software. 3454 * 3455 * (2) The syndrome is recorded for DBE errors, though the utility of the value is not clear. 3456 * 3457 * (3) A NOWAY error does not change the value of the SYN field, and leaves WAYIDX[20:17] 3458 * unpredictable. WAYIDX[16:7] is the L2 block index associated with the command which had no way 3459 * to allocate. 3460 * 3461 * (4) If the status bit corresponding to the value of the TYPE field is not set the WAYIDX/SYN fields 3462 * are not associated with the errors currently logged by the status bits and should be ignored. 3463 * This can occur, for example, because of a race between a write to clear a DBE and a new, lower 3464 * priority, SBE error occuring. If the SBE arrives prior to the DBE clear the WAYIDX/SYN fields 3465 * will still be locked, but the new SBE error status bit will still be set. 3466 */ 3467union cvmx_l2c_err_ttgx { 3468 uint64_t u64; 3469 struct cvmx_l2c_err_ttgx_s { 3470#ifdef __BIG_ENDIAN_BITFIELD 3471 uint64_t dbe : 1; /**< Double-Bit ECC error */ 3472 uint64_t sbe : 1; /**< Single-Bit ECC error */ 3473 uint64_t noway : 1; /**< No way was available for allocation. 3474 L2C sets NOWAY during its processing of a 3475 transaction whenever it needed/wanted to allocate 3476 a WAY in the L2 cache, but was unable to. NOWAY==1 3477 is (generally) not an indication that L2C failed to 3478 complete transactions. Rather, it is a hint of 3479 possible performance degradation. (For example, L2C 3480 must read-modify-write DRAM for every transaction 3481 that updates some, but not all, of the bytes in a 3482 cache block, misses in the L2 cache, and cannot 3483 allocate a WAY.) There is one "failure" case where 3484 L2C will set NOWAY: when it cannot leave a block 3485 locked in the L2 cache as part of a LCKL2 3486 transaction. */ 3487 uint64_t reserved_56_60 : 5; 3488 uint64_t syn : 6; /**< Syndrome for the single-bit error */ 3489 uint64_t reserved_22_49 : 28; 3490 uint64_t wayidx : 15; /**< Way and index of the L2 block containing the error */ 3491 uint64_t reserved_2_6 : 5; 3492 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3493 0 - not valid 3494 1 - NOWAY 3495 2 - SBE 3496 3 - DBE */ 3497#else 3498 uint64_t type : 2; 3499 uint64_t reserved_2_6 : 5; 3500 uint64_t wayidx : 15; 3501 uint64_t reserved_22_49 : 28; 3502 uint64_t syn : 6; 3503 uint64_t reserved_56_60 : 5; 3504 uint64_t noway : 1; 3505 uint64_t sbe : 1; 3506 uint64_t dbe : 1; 3507#endif 3508 } s; 3509 struct cvmx_l2c_err_ttgx_cn61xx { 3510#ifdef __BIG_ENDIAN_BITFIELD 3511 uint64_t dbe : 1; /**< Double-Bit ECC error */ 3512 uint64_t sbe : 1; /**< Single-Bit ECC error */ 3513 uint64_t noway : 1; /**< No way was available for allocation. 3514 L2C sets NOWAY during its processing of a 3515 transaction whenever it needed/wanted to allocate 3516 a WAY in the L2 cache, but was unable to. NOWAY==1 3517 is (generally) not an indication that L2C failed to 3518 complete transactions. Rather, it is a hint of 3519 possible performance degradation. (For example, L2C 3520 must read-modify-write DRAM for every transaction 3521 that updates some, but not all, of the bytes in a 3522 cache block, misses in the L2 cache, and cannot 3523 allocate a WAY.) There is one "failure" case where 3524 L2C will set NOWAY: when it cannot leave a block 3525 locked in the L2 cache as part of a LCKL2 3526 transaction. */ 3527 uint64_t reserved_56_60 : 5; 3528 uint64_t syn : 6; /**< Syndrome for the single-bit error */ 3529 uint64_t reserved_20_49 : 30; 3530 uint64_t wayidx : 13; /**< Way and index of the L2 block containing the error */ 3531 uint64_t reserved_2_6 : 5; 3532 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3533 0 - not valid 3534 1 - NOWAY 3535 2 - SBE 3536 3 - DBE */ 3537#else 3538 uint64_t type : 2; 3539 uint64_t reserved_2_6 : 5; 3540 uint64_t wayidx : 13; 3541 uint64_t reserved_20_49 : 30; 3542 uint64_t syn : 6; 3543 uint64_t reserved_56_60 : 5; 3544 uint64_t noway : 1; 3545 uint64_t sbe : 1; 3546 uint64_t dbe : 1; 3547#endif 3548 } cn61xx; 3549 struct cvmx_l2c_err_ttgx_cn63xx { 3550#ifdef __BIG_ENDIAN_BITFIELD 3551 uint64_t dbe : 1; /**< Double-Bit ECC error */ 3552 uint64_t sbe : 1; /**< Single-Bit ECC error */ 3553 uint64_t noway : 1; /**< No way was available for allocation. 3554 L2C sets NOWAY during its processing of a 3555 transaction whenever it needed/wanted to allocate 3556 a WAY in the L2 cache, but was unable to. NOWAY==1 3557 is (generally) not an indication that L2C failed to 3558 complete transactions. Rather, it is a hint of 3559 possible performance degradation. (For example, L2C 3560 must read-modify-write DRAM for every transaction 3561 that updates some, but not all, of the bytes in a 3562 cache block, misses in the L2 cache, and cannot 3563 allocate a WAY.) There is one "failure" case where 3564 L2C will set NOWAY: when it cannot leave a block 3565 locked in the L2 cache as part of a LCKL2 3566 transaction. */ 3567 uint64_t reserved_56_60 : 5; 3568 uint64_t syn : 6; /**< Syndrome for the single-bit error */ 3569 uint64_t reserved_21_49 : 29; 3570 uint64_t wayidx : 14; /**< Way and index of the L2 block containing the error */ 3571 uint64_t reserved_2_6 : 5; 3572 uint64_t type : 2; /**< The type of error the WAYIDX,SYN were latched for. 3573 0 - not valid 3574 1 - NOWAY 3575 2 - SBE 3576 3 - DBE */ 3577#else 3578 uint64_t type : 2; 3579 uint64_t reserved_2_6 : 5; 3580 uint64_t wayidx : 14; 3581 uint64_t reserved_21_49 : 29; 3582 uint64_t syn : 6; 3583 uint64_t reserved_56_60 : 5; 3584 uint64_t noway : 1; 3585 uint64_t sbe : 1; 3586 uint64_t dbe : 1; 3587#endif 3588 } cn63xx; 3589 struct cvmx_l2c_err_ttgx_cn63xx cn63xxp1; 3590 struct cvmx_l2c_err_ttgx_cn63xx cn66xx; 3591 struct cvmx_l2c_err_ttgx_s cn68xx; 3592 struct cvmx_l2c_err_ttgx_s cn68xxp1; 3593 struct cvmx_l2c_err_ttgx_cn61xx cnf71xx; 3594}; 3595typedef union cvmx_l2c_err_ttgx cvmx_l2c_err_ttgx_t; 3596 3597/** 3598 * cvmx_l2c_err_vbf# 3599 * 3600 * L2C_ERR_VBF = L2C VBF Error Info 3601 * 3602 * 3603 * Notes: 3604 * (1) The way/index information is stored in L2C_ERR_TDT, assuming no later interrupt occurred to 3605 * overwrite the information. See the notes associated with L2C_ERR_TDT for full details. 3606 * 3607 * (2) The first VSBE will lock the register for other VSBE's. A VDBE, however, will overwrite a 3608 * previously logged VSBE. Once a VDBE has been logged all later errors will not be logged. This 3609 * means that if VDBE is set the information in the register is for the VDBE, if VDBE is clear and 3610 * VSBE is set the register contains information about the VSBE. 3611 * 3612 * (3) The syndrome is recorded for VDBE errors, though the utility of the value is not clear. 3613 * 3614 * (4) If the status bit corresponding to the value of the TYPE field is not set the SYN field is not 3615 * associated with the errors currently logged by the status bits and should be ignored. This can 3616 * occur, for example, because of a race between a write to clear a VDBE and a new, lower priority, 3617 * VSBE error occuring. If the VSBE arrives prior to the VDBE clear the SYN field will still be 3618 * locked, but the new VSBE error status bit will still be set. 3619 */ 3620union cvmx_l2c_err_vbfx { 3621 uint64_t u64; 3622 struct cvmx_l2c_err_vbfx_s { 3623#ifdef __BIG_ENDIAN_BITFIELD 3624 uint64_t reserved_62_63 : 2; 3625 uint64_t vdbe : 1; /**< VBF Double-Bit error has occurred */ 3626 uint64_t vsbe : 1; /**< VBF Single-Bit error has occurred */ 3627 uint64_t vsyn : 10; /**< VBF syndrome (valid only if VSBE/VDBE is set) */ 3628 uint64_t reserved_2_49 : 48; 3629 uint64_t type : 2; /**< The type of error the SYN were latched for. 3630 0 - VSBE 3631 1 - VDBE */ 3632#else 3633 uint64_t type : 2; 3634 uint64_t reserved_2_49 : 48; 3635 uint64_t vsyn : 10; 3636 uint64_t vsbe : 1; 3637 uint64_t vdbe : 1; 3638 uint64_t reserved_62_63 : 2; 3639#endif 3640 } s; 3641 struct cvmx_l2c_err_vbfx_s cn61xx; 3642 struct cvmx_l2c_err_vbfx_s cn63xx; 3643 struct cvmx_l2c_err_vbfx_s cn63xxp1; 3644 struct cvmx_l2c_err_vbfx_s cn66xx; 3645 struct cvmx_l2c_err_vbfx_s cn68xx; 3646 struct cvmx_l2c_err_vbfx_s cn68xxp1; 3647 struct cvmx_l2c_err_vbfx_s cnf71xx; 3648}; 3649typedef union cvmx_l2c_err_vbfx cvmx_l2c_err_vbfx_t; 3650 3651/** 3652 * cvmx_l2c_err_xmc 3653 * 3654 * L2C_ERR_XMC = L2C XMC request error 3655 * 3656 * Description: records error information for HOLE*, BIG* and VRT* interrupts. 3657 * 3658 * Notes: 3659 * (1) The first BIGWR/HOLEWR/VRT* interrupt will lock the register until L2C_INT_REG[6:1] are 3660 * cleared. 3661 * 3662 * (2) ADDR<15:0> will always be zero for VRT* interrupts. 3663 * 3664 * (3) ADDR is the 38-bit OCTEON physical address after hole removal. (The hole is between DR0 3665 * and DR1. Remove the hole by subtracting 256MB from all 38-bit OCTEON L2/DRAM physical addresses 3666 * >= 512 MB.) 3667 * 3668 * (4) For 63xx pass 2.0 and all 68xx ADDR<15:0> will ALWAYS be zero. 3669 */ 3670union cvmx_l2c_err_xmc { 3671 uint64_t u64; 3672 struct cvmx_l2c_err_xmc_s { 3673#ifdef __BIG_ENDIAN_BITFIELD 3674 uint64_t cmd : 6; /**< XMC command or request causing error */ 3675 uint64_t reserved_54_57 : 4; 3676 uint64_t sid : 6; /**< XMC sid of request causing error */ 3677 uint64_t reserved_38_47 : 10; 3678 uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */ 3679#else 3680 uint64_t addr : 38; 3681 uint64_t reserved_38_47 : 10; 3682 uint64_t sid : 6; 3683 uint64_t reserved_54_57 : 4; 3684 uint64_t cmd : 6; 3685#endif 3686 } s; 3687 struct cvmx_l2c_err_xmc_cn61xx { 3688#ifdef __BIG_ENDIAN_BITFIELD 3689 uint64_t cmd : 6; /**< XMC command or request causing error */ 3690 uint64_t reserved_52_57 : 6; 3691 uint64_t sid : 4; /**< XMC sid of request causing error */ 3692 uint64_t reserved_38_47 : 10; 3693 uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */ 3694#else 3695 uint64_t addr : 38; 3696 uint64_t reserved_38_47 : 10; 3697 uint64_t sid : 4; 3698 uint64_t reserved_52_57 : 6; 3699 uint64_t cmd : 6; 3700#endif 3701 } cn61xx; 3702 struct cvmx_l2c_err_xmc_cn61xx cn63xx; 3703 struct cvmx_l2c_err_xmc_cn61xx cn63xxp1; 3704 struct cvmx_l2c_err_xmc_cn66xx { 3705#ifdef __BIG_ENDIAN_BITFIELD 3706 uint64_t cmd : 6; /**< XMC command or request causing error */ 3707 uint64_t reserved_53_57 : 5; 3708 uint64_t sid : 5; /**< XMC sid of request causing error */ 3709 uint64_t reserved_38_47 : 10; 3710 uint64_t addr : 38; /**< XMC address causing the error (see Notes 2 and 3) */ 3711#else 3712 uint64_t addr : 38; 3713 uint64_t reserved_38_47 : 10; 3714 uint64_t sid : 5; 3715 uint64_t reserved_53_57 : 5; 3716 uint64_t cmd : 6; 3717#endif 3718 } cn66xx; 3719 struct cvmx_l2c_err_xmc_s cn68xx; 3720 struct cvmx_l2c_err_xmc_s cn68xxp1; 3721 struct cvmx_l2c_err_xmc_cn61xx cnf71xx; 3722}; 3723typedef union cvmx_l2c_err_xmc cvmx_l2c_err_xmc_t; 3724 3725/** 3726 * cvmx_l2c_grpwrr0 3727 * 3728 * L2C_GRPWRR0 = L2C PP Weighted Round \#0 Register 3729 * 3730 * Description: Defines Weighted rounds(32) for Group PLC0,PLC1 3731 * 3732 * Notes: 3733 * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP 3734 * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear). 3735 */ 3736union cvmx_l2c_grpwrr0 { 3737 uint64_t u64; 3738 struct cvmx_l2c_grpwrr0_s { 3739#ifdef __BIG_ENDIAN_BITFIELD 3740 uint64_t plc1rmsk : 32; /**< PLC1 Group#1 Weighted Round Mask 3741 Each bit represents 1 of 32 rounds 3742 for Group \#1's participation. When a 'round' bit is 3743 set, Group#1 is 'masked' and DOES NOT participate. 3744 When a 'round' bit is clear, Group#1 WILL 3745 participate in the arbitration for this round. */ 3746 uint64_t plc0rmsk : 32; /**< PLC Group#0 Weighted Round Mask 3747 Each bit represents 1 of 32 rounds 3748 for Group \#0's participation. When a 'round' bit is 3749 set, Group#0 is 'masked' and DOES NOT participate. 3750 When a 'round' bit is clear, Group#0 WILL 3751 participate in the arbitration for this round. */ 3752#else 3753 uint64_t plc0rmsk : 32; 3754 uint64_t plc1rmsk : 32; 3755#endif 3756 } s; 3757 struct cvmx_l2c_grpwrr0_s cn52xx; 3758 struct cvmx_l2c_grpwrr0_s cn52xxp1; 3759 struct cvmx_l2c_grpwrr0_s cn56xx; 3760 struct cvmx_l2c_grpwrr0_s cn56xxp1; 3761}; 3762typedef union cvmx_l2c_grpwrr0 cvmx_l2c_grpwrr0_t; 3763 3764/** 3765 * cvmx_l2c_grpwrr1 3766 * 3767 * L2C_GRPWRR1 = L2C PP Weighted Round \#1 Register 3768 * 3769 * Description: Defines Weighted Rounds(32) for Group PLC2,ILC 3770 * 3771 * Notes: 3772 * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP 3773 * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear). 3774 */ 3775union cvmx_l2c_grpwrr1 { 3776 uint64_t u64; 3777 struct cvmx_l2c_grpwrr1_s { 3778#ifdef __BIG_ENDIAN_BITFIELD 3779 uint64_t ilcrmsk : 32; /**< ILC (IOB) Weighted Round Mask 3780 Each bit represents 1 of 32 rounds 3781 for IOB participation. When a 'round' bit is 3782 set, IOB is 'masked' and DOES NOT participate. 3783 When a 'round' bit is clear, IOB WILL 3784 participate in the arbitration for this round. */ 3785 uint64_t plc2rmsk : 32; /**< PLC Group#2 Weighted Round Mask 3786 Each bit represents 1 of 32 rounds 3787 for Group \#2's participation. When a 'round' bit is 3788 set, Group#2 is 'masked' and DOES NOT participate. 3789 When a 'round' bit is clear, Group#2 WILL 3790 participate in the arbitration for this round. */ 3791#else 3792 uint64_t plc2rmsk : 32; 3793 uint64_t ilcrmsk : 32; 3794#endif 3795 } s; 3796 struct cvmx_l2c_grpwrr1_s cn52xx; 3797 struct cvmx_l2c_grpwrr1_s cn52xxp1; 3798 struct cvmx_l2c_grpwrr1_s cn56xx; 3799 struct cvmx_l2c_grpwrr1_s cn56xxp1; 3800}; 3801typedef union cvmx_l2c_grpwrr1 cvmx_l2c_grpwrr1_t; 3802 3803/** 3804 * cvmx_l2c_int_en 3805 * 3806 * L2C_INT_EN = L2C Global Interrupt Enable Register 3807 * 3808 * Description: 3809 */ 3810union cvmx_l2c_int_en { 3811 uint64_t u64; 3812 struct cvmx_l2c_int_en_s { 3813#ifdef __BIG_ENDIAN_BITFIELD 3814 uint64_t reserved_9_63 : 55; 3815 uint64_t lck2ena : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit 3816 NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA2] */ 3817 uint64_t lckena : 1; /**< L2 Tag Lock Error Interrupt Enable bit 3818 NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA] */ 3819 uint64_t l2ddeden : 1; /**< L2 Data ECC Double Error Detect(DED) Interrupt Enable bit 3820 When set, allows interrupts to be reported on double bit 3821 (uncorrectable) errors from the L2 Data Arrays. 3822 NOTE: This is the 'same' bit as L2D_ERR[DED_INTENA] */ 3823 uint64_t l2dsecen : 1; /**< L2 Data ECC Single Error Correct(SEC) Interrupt Enable bit 3824 When set, allows interrupts to be reported on single bit 3825 (correctable) errors from the L2 Data Arrays. 3826 NOTE: This is the 'same' bit as L2D_ERR[SEC_INTENA] */ 3827 uint64_t l2tdeden : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt 3828 NOTE: This is the 'same' bit as L2T_ERR[DED_INTENA] */ 3829 uint64_t l2tsecen : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt 3830 Enable bit. When set, allows interrupts to be 3831 reported on single bit (correctable) errors from 3832 the L2 Tag Arrays. 3833 NOTE: This is the 'same' bit as L2T_ERR[SEC_INTENA] */ 3834 uint64_t oob3en : 1; /**< DMA Out of Bounds Interrupt Enable Range#3 */ 3835 uint64_t oob2en : 1; /**< DMA Out of Bounds Interrupt Enable Range#2 */ 3836 uint64_t oob1en : 1; /**< DMA Out of Bounds Interrupt Enable Range#1 */ 3837#else 3838 uint64_t oob1en : 1; 3839 uint64_t oob2en : 1; 3840 uint64_t oob3en : 1; 3841 uint64_t l2tsecen : 1; 3842 uint64_t l2tdeden : 1; 3843 uint64_t l2dsecen : 1; 3844 uint64_t l2ddeden : 1; 3845 uint64_t lckena : 1; 3846 uint64_t lck2ena : 1; 3847 uint64_t reserved_9_63 : 55; 3848#endif 3849 } s; 3850 struct cvmx_l2c_int_en_s cn52xx; 3851 struct cvmx_l2c_int_en_s cn52xxp1; 3852 struct cvmx_l2c_int_en_s cn56xx; 3853 struct cvmx_l2c_int_en_s cn56xxp1; 3854}; 3855typedef union cvmx_l2c_int_en cvmx_l2c_int_en_t; 3856 3857/** 3858 * cvmx_l2c_int_ena 3859 * 3860 * L2C_INT_ENA = L2C Interrupt Enable 3861 * 3862 */ 3863union cvmx_l2c_int_ena { 3864 uint64_t u64; 3865 struct cvmx_l2c_int_ena_s { 3866#ifdef __BIG_ENDIAN_BITFIELD 3867 uint64_t reserved_8_63 : 56; 3868 uint64_t bigrd : 1; /**< Read reference past MAXDRAM enable */ 3869 uint64_t bigwr : 1; /**< Write reference past MAXDRAM enable */ 3870 uint64_t vrtpe : 1; /**< Virtualization memory parity error */ 3871 uint64_t vrtadrng : 1; /**< Address outside of virtualization range enable */ 3872 uint64_t vrtidrng : 1; /**< Virtualization ID out of range enable */ 3873 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write enable */ 3874 uint64_t holewr : 1; /**< Write reference to 256MB hole enable */ 3875 uint64_t holerd : 1; /**< Read reference to 256MB hole enable */ 3876#else 3877 uint64_t holerd : 1; 3878 uint64_t holewr : 1; 3879 uint64_t vrtwr : 1; 3880 uint64_t vrtidrng : 1; 3881 uint64_t vrtadrng : 1; 3882 uint64_t vrtpe : 1; 3883 uint64_t bigwr : 1; 3884 uint64_t bigrd : 1; 3885 uint64_t reserved_8_63 : 56; 3886#endif 3887 } s; 3888 struct cvmx_l2c_int_ena_s cn61xx; 3889 struct cvmx_l2c_int_ena_s cn63xx; 3890 struct cvmx_l2c_int_ena_cn63xxp1 { 3891#ifdef __BIG_ENDIAN_BITFIELD 3892 uint64_t reserved_6_63 : 58; 3893 uint64_t vrtpe : 1; /**< Virtualization memory parity error */ 3894 uint64_t vrtadrng : 1; /**< Address outside of virtualization range enable */ 3895 uint64_t vrtidrng : 1; /**< Virtualization ID out of range enable */ 3896 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write enable */ 3897 uint64_t holewr : 1; /**< Write reference to 256MB hole enable */ 3898 uint64_t holerd : 1; /**< Read reference to 256MB hole enable */ 3899#else 3900 uint64_t holerd : 1; 3901 uint64_t holewr : 1; 3902 uint64_t vrtwr : 1; 3903 uint64_t vrtidrng : 1; 3904 uint64_t vrtadrng : 1; 3905 uint64_t vrtpe : 1; 3906 uint64_t reserved_6_63 : 58; 3907#endif 3908 } cn63xxp1; 3909 struct cvmx_l2c_int_ena_s cn66xx; 3910 struct cvmx_l2c_int_ena_s cn68xx; 3911 struct cvmx_l2c_int_ena_s cn68xxp1; 3912 struct cvmx_l2c_int_ena_s cnf71xx; 3913}; 3914typedef union cvmx_l2c_int_ena cvmx_l2c_int_ena_t; 3915 3916/** 3917 * cvmx_l2c_int_reg 3918 * 3919 * L2C_INT_REG = L2C Interrupt Register 3920 * 3921 */ 3922union cvmx_l2c_int_reg { 3923 uint64_t u64; 3924 struct cvmx_l2c_int_reg_s { 3925#ifdef __BIG_ENDIAN_BITFIELD 3926 uint64_t reserved_20_63 : 44; 3927 uint64_t tad3 : 1; /**< When set, the enabled interrupt is in 3928 the L2C_TAD3_INT CSR */ 3929 uint64_t tad2 : 1; /**< When set, the enabled interrupt is in 3930 the L2C_TAD2_INT CSR */ 3931 uint64_t tad1 : 1; /**< When set, the enabled interrupt is in 3932 the L2C_TAD1_INT CSR */ 3933 uint64_t tad0 : 1; /**< When set, the enabled interrupt is in 3934 the L2C_TAD0_INT CSR */ 3935 uint64_t reserved_8_15 : 8; 3936 uint64_t bigrd : 1; /**< Read reference past L2C_BIG_CTL[MAXDRAM] occurred */ 3937 uint64_t bigwr : 1; /**< Write reference past L2C_BIG_CTL[MAXDRAM] occurred */ 3938 uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error 3939 Whenever an L2C_VRT_MEM read finds a parity error, 3940 that L2C_VRT_MEM cannot cause stores to be blocked. 3941 Software should correct the error. */ 3942 uint64_t vrtadrng : 1; /**< Address outside of virtualization range 3943 Set when a L2C_VRT_CTL[MEMSZ] violation blocked a 3944 store. 3945 L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */ 3946 uint64_t vrtidrng : 1; /**< Virtualization ID out of range 3947 Set when a L2C_VRT_CTL[NUMID] violation blocked a 3948 store. */ 3949 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write 3950 Set when L2C_VRT_MEM blocked a store. */ 3951 uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */ 3952 uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */ 3953#else 3954 uint64_t holerd : 1; 3955 uint64_t holewr : 1; 3956 uint64_t vrtwr : 1; 3957 uint64_t vrtidrng : 1; 3958 uint64_t vrtadrng : 1; 3959 uint64_t vrtpe : 1; 3960 uint64_t bigwr : 1; 3961 uint64_t bigrd : 1; 3962 uint64_t reserved_8_15 : 8; 3963 uint64_t tad0 : 1; 3964 uint64_t tad1 : 1; 3965 uint64_t tad2 : 1; 3966 uint64_t tad3 : 1; 3967 uint64_t reserved_20_63 : 44; 3968#endif 3969 } s; 3970 struct cvmx_l2c_int_reg_cn61xx { 3971#ifdef __BIG_ENDIAN_BITFIELD 3972 uint64_t reserved_17_63 : 47; 3973 uint64_t tad0 : 1; /**< When set, the enabled interrupt is in 3974 the L2C_TAD0_INT CSR */ 3975 uint64_t reserved_8_15 : 8; 3976 uint64_t bigrd : 1; /**< Read reference past L2C_BIG_CTL[MAXDRAM] occurred */ 3977 uint64_t bigwr : 1; /**< Write reference past L2C_BIG_CTL[MAXDRAM] occurred */ 3978 uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error 3979 Whenever an L2C_VRT_MEM read finds a parity error, 3980 that L2C_VRT_MEM cannot cause stores to be blocked. 3981 Software should correct the error. */ 3982 uint64_t vrtadrng : 1; /**< Address outside of virtualization range 3983 Set when a L2C_VRT_CTL[MEMSZ] violation blocked a 3984 store. 3985 L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */ 3986 uint64_t vrtidrng : 1; /**< Virtualization ID out of range 3987 Set when a L2C_VRT_CTL[NUMID] violation blocked a 3988 store. */ 3989 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write 3990 Set when L2C_VRT_MEM blocked a store. */ 3991 uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */ 3992 uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */ 3993#else 3994 uint64_t holerd : 1; 3995 uint64_t holewr : 1; 3996 uint64_t vrtwr : 1; 3997 uint64_t vrtidrng : 1; 3998 uint64_t vrtadrng : 1; 3999 uint64_t vrtpe : 1; 4000 uint64_t bigwr : 1; 4001 uint64_t bigrd : 1; 4002 uint64_t reserved_8_15 : 8; 4003 uint64_t tad0 : 1; 4004 uint64_t reserved_17_63 : 47; 4005#endif 4006 } cn61xx; 4007 struct cvmx_l2c_int_reg_cn61xx cn63xx; 4008 struct cvmx_l2c_int_reg_cn63xxp1 { 4009#ifdef __BIG_ENDIAN_BITFIELD 4010 uint64_t reserved_17_63 : 47; 4011 uint64_t tad0 : 1; /**< When set, the enabled interrupt is in either 4012 the L2C_ERR_TDT0 or L2C_ERR_TTG0 CSR */ 4013 uint64_t reserved_6_15 : 10; 4014 uint64_t vrtpe : 1; /**< L2C_VRT_MEM read found a parity error 4015 Whenever an L2C_VRT_MEM read finds a parity error, 4016 that L2C_VRT_MEM cannot cause stores to be blocked. 4017 Software should correct the error. */ 4018 uint64_t vrtadrng : 1; /**< Address outside of virtualization range 4019 Set when a L2C_VRT_CTL[MEMSZ] violation blocked a 4020 store. 4021 L2C_VRT_CTL[OOBERR] must be set for L2C to set this. */ 4022 uint64_t vrtidrng : 1; /**< Virtualization ID out of range 4023 Set when a L2C_VRT_CTL[NUMID] violation blocked a 4024 store. */ 4025 uint64_t vrtwr : 1; /**< Virtualization ID prevented a write 4026 Set when L2C_VRT_MEM blocked a store. */ 4027 uint64_t holewr : 1; /**< Write reference to 256MB hole occurred */ 4028 uint64_t holerd : 1; /**< Read reference to 256MB hole occurred */ 4029#else 4030 uint64_t holerd : 1; 4031 uint64_t holewr : 1; 4032 uint64_t vrtwr : 1; 4033 uint64_t vrtidrng : 1; 4034 uint64_t vrtadrng : 1; 4035 uint64_t vrtpe : 1; 4036 uint64_t reserved_6_15 : 10; 4037 uint64_t tad0 : 1; 4038 uint64_t reserved_17_63 : 47; 4039#endif 4040 } cn63xxp1; 4041 struct cvmx_l2c_int_reg_cn61xx cn66xx; 4042 struct cvmx_l2c_int_reg_s cn68xx; 4043 struct cvmx_l2c_int_reg_s cn68xxp1; 4044 struct cvmx_l2c_int_reg_cn61xx cnf71xx; 4045}; 4046typedef union cvmx_l2c_int_reg cvmx_l2c_int_reg_t; 4047 4048/** 4049 * cvmx_l2c_int_stat 4050 * 4051 * L2C_INT_STAT = L2C Global Interrupt Status Register 4052 * 4053 * Description: 4054 */ 4055union cvmx_l2c_int_stat { 4056 uint64_t u64; 4057 struct cvmx_l2c_int_stat_s { 4058#ifdef __BIG_ENDIAN_BITFIELD 4059 uint64_t reserved_9_63 : 55; 4060 uint64_t lck2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n 4061 could not find an available/unlocked set (for 4062 replacement). 4063 Most likely, this is a result of SW mixing SET 4064 PARTITIONING with ADDRESS LOCKING. If SW allows 4065 another PP to LOCKDOWN all SETs available to PP#n, 4066 then a Rd/Wr Miss from PP#n will be unable 4067 to determine a 'valid' replacement set (since LOCKED 4068 addresses should NEVER be replaced). 4069 If such an event occurs, the HW will select the smallest 4070 available SET(specified by UMSK'x)' as the replacement 4071 set, and the address is unlocked. 4072 NOTE: This is the 'same' bit as L2T_ERR[LCKERR2] */ 4073 uint64_t lck : 1; /**< SW attempted to LOCK DOWN the last available set of 4074 the INDEX (which is ignored by HW - but reported to SW). 4075 The LDD(L1 load-miss) for the LOCK operation is completed 4076 successfully, however the address is NOT locked. 4077 NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] 4078 into account. For example, if diagnostic PPx has 4079 UMSKx defined to only use SETs [1:0], and SET1 had 4080 been previously LOCKED, then an attempt to LOCK the 4081 last available SET0 would result in a LCKERR. (This 4082 is to ensure that at least 1 SET at each INDEX is 4083 not LOCKED for general use by other PPs). 4084 NOTE: This is the 'same' bit as L2T_ERR[LCKERR] */ 4085 uint64_t l2dded : 1; /**< L2D Double Error detected (DED) 4086 NOTE: This is the 'same' bit as L2D_ERR[DED_ERR] */ 4087 uint64_t l2dsec : 1; /**< L2D Single Error corrected (SEC) 4088 NOTE: This is the 'same' bit as L2D_ERR[SEC_ERR] */ 4089 uint64_t l2tded : 1; /**< L2T Double Bit Error detected (DED) 4090 During every L2 Tag Probe, all 8 sets Tag's (at a 4091 given index) are checked for double bit errors(DBEs). 4092 This bit is set if ANY of the 8 sets contains a DBE. 4093 DBEs also generated an interrupt(if enabled). 4094 NOTE: This is the 'same' bit as L2T_ERR[DED_ERR] */ 4095 uint64_t l2tsec : 1; /**< L2T Single Bit Error corrected (SEC) status 4096 During every L2 Tag Probe, all 8 sets Tag's (at a 4097 given index) are checked for single bit errors(SBEs). 4098 This bit is set if ANY of the 8 sets contains an SBE. 4099 SBEs are auto corrected in HW and generate an 4100 interrupt(if enabled). 4101 NOTE: This is the 'same' bit as L2T_ERR[SEC_ERR] */ 4102 uint64_t oob3 : 1; /**< DMA Out of Bounds Interrupt Status Range#3 */ 4103 uint64_t oob2 : 1; /**< DMA Out of Bounds Interrupt Status Range#2 */ 4104 uint64_t oob1 : 1; /**< DMA Out of Bounds Interrupt Status Range#1 */ 4105#else 4106 uint64_t oob1 : 1; 4107 uint64_t oob2 : 1; 4108 uint64_t oob3 : 1; 4109 uint64_t l2tsec : 1; 4110 uint64_t l2tded : 1; 4111 uint64_t l2dsec : 1; 4112 uint64_t l2dded : 1; 4113 uint64_t lck : 1; 4114 uint64_t lck2 : 1; 4115 uint64_t reserved_9_63 : 55; 4116#endif 4117 } s; 4118 struct cvmx_l2c_int_stat_s cn52xx; 4119 struct cvmx_l2c_int_stat_s cn52xxp1; 4120 struct cvmx_l2c_int_stat_s cn56xx; 4121 struct cvmx_l2c_int_stat_s cn56xxp1; 4122}; 4123typedef union cvmx_l2c_int_stat cvmx_l2c_int_stat_t; 4124 4125/** 4126 * cvmx_l2c_ioc#_pfc 4127 * 4128 * L2C_IOC_PFC = L2C IOC Performance Counter(s) 4129 * 4130 */ 4131union cvmx_l2c_iocx_pfc { 4132 uint64_t u64; 4133 struct cvmx_l2c_iocx_pfc_s { 4134#ifdef __BIG_ENDIAN_BITFIELD 4135 uint64_t count : 64; /**< Current counter value */ 4136#else 4137 uint64_t count : 64; 4138#endif 4139 } s; 4140 struct cvmx_l2c_iocx_pfc_s cn61xx; 4141 struct cvmx_l2c_iocx_pfc_s cn63xx; 4142 struct cvmx_l2c_iocx_pfc_s cn63xxp1; 4143 struct cvmx_l2c_iocx_pfc_s cn66xx; 4144 struct cvmx_l2c_iocx_pfc_s cn68xx; 4145 struct cvmx_l2c_iocx_pfc_s cn68xxp1; 4146 struct cvmx_l2c_iocx_pfc_s cnf71xx; 4147}; 4148typedef union cvmx_l2c_iocx_pfc cvmx_l2c_iocx_pfc_t; 4149 4150/** 4151 * cvmx_l2c_ior#_pfc 4152 * 4153 * L2C_IOR_PFC = L2C IOR Performance Counter(s) 4154 * 4155 */ 4156union cvmx_l2c_iorx_pfc { 4157 uint64_t u64; 4158 struct cvmx_l2c_iorx_pfc_s { 4159#ifdef __BIG_ENDIAN_BITFIELD 4160 uint64_t count : 64; /**< Current counter value */ 4161#else 4162 uint64_t count : 64; 4163#endif 4164 } s; 4165 struct cvmx_l2c_iorx_pfc_s cn61xx; 4166 struct cvmx_l2c_iorx_pfc_s cn63xx; 4167 struct cvmx_l2c_iorx_pfc_s cn63xxp1; 4168 struct cvmx_l2c_iorx_pfc_s cn66xx; 4169 struct cvmx_l2c_iorx_pfc_s cn68xx; 4170 struct cvmx_l2c_iorx_pfc_s cn68xxp1; 4171 struct cvmx_l2c_iorx_pfc_s cnf71xx; 4172}; 4173typedef union cvmx_l2c_iorx_pfc cvmx_l2c_iorx_pfc_t; 4174 4175/** 4176 * cvmx_l2c_lckbase 4177 * 4178 * L2C_LCKBASE = L2C LockDown Base Register 4179 * 4180 * Description: L2C LockDown Base Register 4181 * 4182 * Notes: 4183 * (1) SW RESTRICTION \#1: SW must manage the L2 Data Store lockdown space such that at least 1 4184 * set per cache line remains in the 'unlocked' (normal) state to allow general caching operations. 4185 * If SW violates this restriction, a status bit is set (LCK_ERR) and an interrupt is posted. 4186 * [this limits the total lockdown space to 7/8ths of the total L2 data store = 896KB] 4187 * (2) IOB initiated LDI commands are ignored (only PP initiated LDI/LDD commands are considered 4188 * for lockdown). 4189 * (3) To 'unlock' a locked cache line, SW can use the FLUSH-INVAL CSR mechanism (see L2C_DBG[FINV]). 4190 * (4) LCK_ENA MUST only be activated when debug modes are disabled (L2C_DBG[L2T], L2C_DBG[L2D], L2C_DBG[FINV]). 4191 */ 4192union cvmx_l2c_lckbase { 4193 uint64_t u64; 4194 struct cvmx_l2c_lckbase_s { 4195#ifdef __BIG_ENDIAN_BITFIELD 4196 uint64_t reserved_31_63 : 33; 4197 uint64_t lck_base : 27; /**< Base Memory block address[33:7]. Specifies the 4198 starting address of the lockdown region. */ 4199 uint64_t reserved_1_3 : 3; 4200 uint64_t lck_ena : 1; /**< L2 Cache Lock Enable 4201 When the LCK_ENA=1, all LDI(I-stream Load) or 4202 LDD(L1 load-miss) commands issued from the 4203 diagnostic PP (specified by the L2C_DBG[PPNUM]), 4204 which fall within a predefined lockdown address 4205 range (specified by: [lck_base:lck_base+lck_offset]) 4206 are LOCKED in the L2 cache. The LOCKED state is 4207 denoted using an explicit L2 Tag bit (L=1). 4208 If the LOCK request L2-Hits (on ANY SET), then data is 4209 returned from the L2 and the hit set is updated to the 4210 LOCKED state. NOTE: If the Hit Set# is outside the 4211 available sets for a given PP (see UMSK'x'), the 4212 the LOCK bit is still SET. If the programmer's intent 4213 is to explicitly LOCK addresses into 'available' sets, 4214 care must be taken to flush-invalidate the cache first 4215 (to avoid such situations). Not following this procedure 4216 can lead to LCKERR2 interrupts. 4217 If the LOCK request L2-Misses, a replacment set is 4218 chosen(from the available sets (UMSK'x'). 4219 If the replacement set contains a dirty-victim it is 4220 written back to memory. Memory read data is then written 4221 into the replacement set, and the replacment SET is 4222 updated to the LOCKED state(L=1). 4223 NOTE: SETs that contain LOCKED addresses are 4224 excluded from the replacement set selection algorithm. 4225 NOTE: The LDD command will allocate the DuTag as normal. 4226 NOTE: If L2C_CFG[IDXALIAS]=1, the address is 'aliased' first 4227 before being checked against the lockdown address 4228 range. To ensure an 'aliased' address is properly locked, 4229 it is recommmended that SW preload the 'aliased' locked adddress 4230 into the L2C_LCKBASE[LCK_BASE] register (while keeping 4231 L2C_LCKOFF[LCK_OFFSET]=0). 4232 NOTE: The OCTEON(N3) implementation only supports 16GB(MAX) of 4233 physical memory. Therefore, only byte address[33:0] are used 4234 (ie: address[35:34] are ignored). */ 4235#else 4236 uint64_t lck_ena : 1; 4237 uint64_t reserved_1_3 : 3; 4238 uint64_t lck_base : 27; 4239 uint64_t reserved_31_63 : 33; 4240#endif 4241 } s; 4242 struct cvmx_l2c_lckbase_s cn30xx; 4243 struct cvmx_l2c_lckbase_s cn31xx; 4244 struct cvmx_l2c_lckbase_s cn38xx; 4245 struct cvmx_l2c_lckbase_s cn38xxp2; 4246 struct cvmx_l2c_lckbase_s cn50xx; 4247 struct cvmx_l2c_lckbase_s cn52xx; 4248 struct cvmx_l2c_lckbase_s cn52xxp1; 4249 struct cvmx_l2c_lckbase_s cn56xx; 4250 struct cvmx_l2c_lckbase_s cn56xxp1; 4251 struct cvmx_l2c_lckbase_s cn58xx; 4252 struct cvmx_l2c_lckbase_s cn58xxp1; 4253}; 4254typedef union cvmx_l2c_lckbase cvmx_l2c_lckbase_t; 4255 4256/** 4257 * cvmx_l2c_lckoff 4258 * 4259 * L2C_LCKOFF = L2C LockDown OFFSET Register 4260 * 4261 * Description: L2C LockDown OFFSET Register 4262 * 4263 * Notes: 4264 * (1) The generation of the end lockdown block address will 'wrap'. 4265 * (2) The minimum granularity for lockdown is 1 cache line (= 128B block) 4266 */ 4267union cvmx_l2c_lckoff { 4268 uint64_t u64; 4269 struct cvmx_l2c_lckoff_s { 4270#ifdef __BIG_ENDIAN_BITFIELD 4271 uint64_t reserved_10_63 : 54; 4272 uint64_t lck_offset : 10; /**< LockDown block Offset. Used in determining 4273 the ending block address of the lockdown 4274 region: 4275 End Lockdown block Address[33:7] = 4276 LCK_BASE[33:7]+LCK_OFFSET[9:0] */ 4277#else 4278 uint64_t lck_offset : 10; 4279 uint64_t reserved_10_63 : 54; 4280#endif 4281 } s; 4282 struct cvmx_l2c_lckoff_s cn30xx; 4283 struct cvmx_l2c_lckoff_s cn31xx; 4284 struct cvmx_l2c_lckoff_s cn38xx; 4285 struct cvmx_l2c_lckoff_s cn38xxp2; 4286 struct cvmx_l2c_lckoff_s cn50xx; 4287 struct cvmx_l2c_lckoff_s cn52xx; 4288 struct cvmx_l2c_lckoff_s cn52xxp1; 4289 struct cvmx_l2c_lckoff_s cn56xx; 4290 struct cvmx_l2c_lckoff_s cn56xxp1; 4291 struct cvmx_l2c_lckoff_s cn58xx; 4292 struct cvmx_l2c_lckoff_s cn58xxp1; 4293}; 4294typedef union cvmx_l2c_lckoff cvmx_l2c_lckoff_t; 4295 4296/** 4297 * cvmx_l2c_lfb0 4298 * 4299 * L2C_LFB0 = L2C LFB DEBUG 0 Register 4300 * 4301 * Description: L2C LFB Contents (Status Bits) 4302 */ 4303union cvmx_l2c_lfb0 { 4304 uint64_t u64; 4305 struct cvmx_l2c_lfb0_s { 4306#ifdef __BIG_ENDIAN_BITFIELD 4307 uint64_t reserved_32_63 : 32; 4308 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 4309 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 4310 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 4311 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 4312 uint64_t vam : 1; /**< Valid Full Address Match Status */ 4313 uint64_t inxt : 4; /**< Next LFB Pointer(invalid if ITL=1) */ 4314 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 4315 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 4316 uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 4317 uint64_t vabnum : 4; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 4318 uint64_t sid : 9; /**< LFB Source ID */ 4319 uint64_t cmd : 4; /**< LFB Command */ 4320 uint64_t vld : 1; /**< LFB Valid */ 4321#else 4322 uint64_t vld : 1; 4323 uint64_t cmd : 4; 4324 uint64_t sid : 9; 4325 uint64_t vabnum : 4; 4326 uint64_t set : 3; 4327 uint64_t ihd : 1; 4328 uint64_t itl : 1; 4329 uint64_t inxt : 4; 4330 uint64_t vam : 1; 4331 uint64_t stcfl : 1; 4332 uint64_t stinv : 1; 4333 uint64_t stpnd : 1; 4334 uint64_t stcpnd : 1; 4335 uint64_t reserved_32_63 : 32; 4336#endif 4337 } s; 4338 struct cvmx_l2c_lfb0_cn30xx { 4339#ifdef __BIG_ENDIAN_BITFIELD 4340 uint64_t reserved_32_63 : 32; 4341 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 4342 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 4343 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 4344 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 4345 uint64_t vam : 1; /**< Valid Full Address Match Status */ 4346 uint64_t reserved_25_26 : 2; 4347 uint64_t inxt : 2; /**< Next LFB Pointer(invalid if ITL=1) */ 4348 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 4349 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 4350 uint64_t reserved_20_20 : 1; 4351 uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 4352 uint64_t reserved_16_17 : 2; 4353 uint64_t vabnum : 2; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 4354 uint64_t sid : 9; /**< LFB Source ID */ 4355 uint64_t cmd : 4; /**< LFB Command */ 4356 uint64_t vld : 1; /**< LFB Valid */ 4357#else 4358 uint64_t vld : 1; 4359 uint64_t cmd : 4; 4360 uint64_t sid : 9; 4361 uint64_t vabnum : 2; 4362 uint64_t reserved_16_17 : 2; 4363 uint64_t set : 2; 4364 uint64_t reserved_20_20 : 1; 4365 uint64_t ihd : 1; 4366 uint64_t itl : 1; 4367 uint64_t inxt : 2; 4368 uint64_t reserved_25_26 : 2; 4369 uint64_t vam : 1; 4370 uint64_t stcfl : 1; 4371 uint64_t stinv : 1; 4372 uint64_t stpnd : 1; 4373 uint64_t stcpnd : 1; 4374 uint64_t reserved_32_63 : 32; 4375#endif 4376 } cn30xx; 4377 struct cvmx_l2c_lfb0_cn31xx { 4378#ifdef __BIG_ENDIAN_BITFIELD 4379 uint64_t reserved_32_63 : 32; 4380 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 4381 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 4382 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 4383 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 4384 uint64_t vam : 1; /**< Valid Full Address Match Status */ 4385 uint64_t reserved_26_26 : 1; 4386 uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */ 4387 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 4388 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 4389 uint64_t reserved_20_20 : 1; 4390 uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 4391 uint64_t reserved_17_17 : 1; 4392 uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 4393 uint64_t sid : 9; /**< LFB Source ID */ 4394 uint64_t cmd : 4; /**< LFB Command */ 4395 uint64_t vld : 1; /**< LFB Valid */ 4396#else 4397 uint64_t vld : 1; 4398 uint64_t cmd : 4; 4399 uint64_t sid : 9; 4400 uint64_t vabnum : 3; 4401 uint64_t reserved_17_17 : 1; 4402 uint64_t set : 2; 4403 uint64_t reserved_20_20 : 1; 4404 uint64_t ihd : 1; 4405 uint64_t itl : 1; 4406 uint64_t inxt : 3; 4407 uint64_t reserved_26_26 : 1; 4408 uint64_t vam : 1; 4409 uint64_t stcfl : 1; 4410 uint64_t stinv : 1; 4411 uint64_t stpnd : 1; 4412 uint64_t stcpnd : 1; 4413 uint64_t reserved_32_63 : 32; 4414#endif 4415 } cn31xx; 4416 struct cvmx_l2c_lfb0_s cn38xx; 4417 struct cvmx_l2c_lfb0_s cn38xxp2; 4418 struct cvmx_l2c_lfb0_cn50xx { 4419#ifdef __BIG_ENDIAN_BITFIELD 4420 uint64_t reserved_32_63 : 32; 4421 uint64_t stcpnd : 1; /**< LFB STC Pending Status */ 4422 uint64_t stpnd : 1; /**< LFB ST* Pending Status */ 4423 uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ 4424 uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ 4425 uint64_t vam : 1; /**< Valid Full Address Match Status */ 4426 uint64_t reserved_26_26 : 1; 4427 uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */ 4428 uint64_t itl : 1; /**< LFB Tail of List Indicator */ 4429 uint64_t ihd : 1; /**< LFB Head of List Indicator */ 4430 uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */ 4431 uint64_t reserved_17_17 : 1; 4432 uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ 4433 uint64_t sid : 9; /**< LFB Source ID */ 4434 uint64_t cmd : 4; /**< LFB Command */ 4435 uint64_t vld : 1; /**< LFB Valid */ 4436#else 4437 uint64_t vld : 1; 4438 uint64_t cmd : 4; 4439 uint64_t sid : 9; 4440 uint64_t vabnum : 3; 4441 uint64_t reserved_17_17 : 1; 4442 uint64_t set : 3; 4443 uint64_t ihd : 1; 4444 uint64_t itl : 1; 4445 uint64_t inxt : 3; 4446 uint64_t reserved_26_26 : 1; 4447 uint64_t vam : 1; 4448 uint64_t stcfl : 1; 4449 uint64_t stinv : 1; 4450 uint64_t stpnd : 1; 4451 uint64_t stcpnd : 1; 4452 uint64_t reserved_32_63 : 32; 4453#endif 4454 } cn50xx; 4455 struct cvmx_l2c_lfb0_cn50xx cn52xx; 4456 struct cvmx_l2c_lfb0_cn50xx cn52xxp1; 4457 struct cvmx_l2c_lfb0_s cn56xx; 4458 struct cvmx_l2c_lfb0_s cn56xxp1; 4459 struct cvmx_l2c_lfb0_s cn58xx; 4460 struct cvmx_l2c_lfb0_s cn58xxp1; 4461}; 4462typedef union cvmx_l2c_lfb0 cvmx_l2c_lfb0_t; 4463 4464/** 4465 * cvmx_l2c_lfb1 4466 * 4467 * L2C_LFB1 = L2C LFB DEBUG 1 Register 4468 * 4469 * Description: L2C LFB Contents (Wait Bits) 4470 */ 4471union cvmx_l2c_lfb1 { 4472 uint64_t u64; 4473 struct cvmx_l2c_lfb1_s { 4474#ifdef __BIG_ENDIAN_BITFIELD 4475 uint64_t reserved_19_63 : 45; 4476 uint64_t dsgoing : 1; /**< LFB DS Going (in flight) */ 4477 uint64_t bid : 2; /**< LFB DS Bid# */ 4478 uint64_t wtrsp : 1; /**< LFB Waiting for RSC Response [FILL,STRSP] completion */ 4479 uint64_t wtdw : 1; /**< LFB Waiting for DS-WR completion */ 4480 uint64_t wtdq : 1; /**< LFB Waiting for LFB-DQ */ 4481 uint64_t wtwhp : 1; /**< LFB Waiting for Write-Hit Partial L2 DS-WR completion */ 4482 uint64_t wtwhf : 1; /**< LFB Waiting for Write-Hit Full L2 DS-WR completion */ 4483 uint64_t wtwrm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */ 4484 uint64_t wtstm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */ 4485 uint64_t wtrda : 1; /**< LFB Waiting for Read-Miss L2 DS-WR completion */ 4486 uint64_t wtstdt : 1; /**< LFB Waiting for all ST write Data to arrive on XMD bus */ 4487 uint64_t wtstrsp : 1; /**< LFB Waiting for ST RSC/RSD to be issued on RSP 4488 (with invalidates) */ 4489 uint64_t wtstrsc : 1; /**< LFB Waiting for ST RSC-Only to be issued on RSP 4490 (no-invalidates) */ 4491 uint64_t wtvtm : 1; /**< LFB Waiting for Victim Read L2 DS-RD completion */ 4492 uint64_t wtmfl : 1; /**< LFB Waiting for Memory Fill completion to MRB */ 4493 uint64_t prbrty : 1; /**< Probe-Retry Detected - waiting for probe completion */ 4494 uint64_t wtprb : 1; /**< LFB Waiting for Probe */ 4495 uint64_t vld : 1; /**< LFB Valid */ 4496#else 4497 uint64_t vld : 1; 4498 uint64_t wtprb : 1; 4499 uint64_t prbrty : 1; 4500 uint64_t wtmfl : 1; 4501 uint64_t wtvtm : 1; 4502 uint64_t wtstrsc : 1; 4503 uint64_t wtstrsp : 1; 4504 uint64_t wtstdt : 1; 4505 uint64_t wtrda : 1; 4506 uint64_t wtstm : 1; 4507 uint64_t wtwrm : 1; 4508 uint64_t wtwhf : 1; 4509 uint64_t wtwhp : 1; 4510 uint64_t wtdq : 1; 4511 uint64_t wtdw : 1; 4512 uint64_t wtrsp : 1; 4513 uint64_t bid : 2; 4514 uint64_t dsgoing : 1; 4515 uint64_t reserved_19_63 : 45; 4516#endif 4517 } s; 4518 struct cvmx_l2c_lfb1_s cn30xx; 4519 struct cvmx_l2c_lfb1_s cn31xx; 4520 struct cvmx_l2c_lfb1_s cn38xx; 4521 struct cvmx_l2c_lfb1_s cn38xxp2; 4522 struct cvmx_l2c_lfb1_s cn50xx; 4523 struct cvmx_l2c_lfb1_s cn52xx; 4524 struct cvmx_l2c_lfb1_s cn52xxp1; 4525 struct cvmx_l2c_lfb1_s cn56xx; 4526 struct cvmx_l2c_lfb1_s cn56xxp1; 4527 struct cvmx_l2c_lfb1_s cn58xx; 4528 struct cvmx_l2c_lfb1_s cn58xxp1; 4529}; 4530typedef union cvmx_l2c_lfb1 cvmx_l2c_lfb1_t; 4531 4532/** 4533 * cvmx_l2c_lfb2 4534 * 4535 * L2C_LFB2 = L2C LFB DEBUG 2 Register 4536 * 4537 * Description: L2C LFB Contents Tag/Index 4538 */ 4539union cvmx_l2c_lfb2 { 4540 uint64_t u64; 4541 struct cvmx_l2c_lfb2_s { 4542#ifdef __BIG_ENDIAN_BITFIELD 4543 uint64_t reserved_0_63 : 64; 4544#else 4545 uint64_t reserved_0_63 : 64; 4546#endif 4547 } s; 4548 struct cvmx_l2c_lfb2_cn30xx { 4549#ifdef __BIG_ENDIAN_BITFIELD 4550 uint64_t reserved_27_63 : 37; 4551 uint64_t lfb_tag : 19; /**< LFB TAG[33:15] */ 4552 uint64_t lfb_idx : 8; /**< LFB IDX[14:7] */ 4553#else 4554 uint64_t lfb_idx : 8; 4555 uint64_t lfb_tag : 19; 4556 uint64_t reserved_27_63 : 37; 4557#endif 4558 } cn30xx; 4559 struct cvmx_l2c_lfb2_cn31xx { 4560#ifdef __BIG_ENDIAN_BITFIELD 4561 uint64_t reserved_27_63 : 37; 4562 uint64_t lfb_tag : 17; /**< LFB TAG[33:16] */ 4563 uint64_t lfb_idx : 10; /**< LFB IDX[15:7] */ 4564#else 4565 uint64_t lfb_idx : 10; 4566 uint64_t lfb_tag : 17; 4567 uint64_t reserved_27_63 : 37; 4568#endif 4569 } cn31xx; 4570 struct cvmx_l2c_lfb2_cn31xx cn38xx; 4571 struct cvmx_l2c_lfb2_cn31xx cn38xxp2; 4572 struct cvmx_l2c_lfb2_cn50xx { 4573#ifdef __BIG_ENDIAN_BITFIELD 4574 uint64_t reserved_27_63 : 37; 4575 uint64_t lfb_tag : 20; /**< LFB TAG[33:14] */ 4576 uint64_t lfb_idx : 7; /**< LFB IDX[13:7] */ 4577#else 4578 uint64_t lfb_idx : 7; 4579 uint64_t lfb_tag : 20; 4580 uint64_t reserved_27_63 : 37; 4581#endif 4582 } cn50xx; 4583 struct cvmx_l2c_lfb2_cn52xx { 4584#ifdef __BIG_ENDIAN_BITFIELD 4585 uint64_t reserved_27_63 : 37; 4586 uint64_t lfb_tag : 18; /**< LFB TAG[33:16] */ 4587 uint64_t lfb_idx : 9; /**< LFB IDX[15:7] */ 4588#else 4589 uint64_t lfb_idx : 9; 4590 uint64_t lfb_tag : 18; 4591 uint64_t reserved_27_63 : 37; 4592#endif 4593 } cn52xx; 4594 struct cvmx_l2c_lfb2_cn52xx cn52xxp1; 4595 struct cvmx_l2c_lfb2_cn56xx { 4596#ifdef __BIG_ENDIAN_BITFIELD 4597 uint64_t reserved_27_63 : 37; 4598 uint64_t lfb_tag : 16; /**< LFB TAG[33:18] */ 4599 uint64_t lfb_idx : 11; /**< LFB IDX[17:7] */ 4600#else 4601 uint64_t lfb_idx : 11; 4602 uint64_t lfb_tag : 16; 4603 uint64_t reserved_27_63 : 37; 4604#endif 4605 } cn56xx; 4606 struct cvmx_l2c_lfb2_cn56xx cn56xxp1; 4607 struct cvmx_l2c_lfb2_cn56xx cn58xx; 4608 struct cvmx_l2c_lfb2_cn56xx cn58xxp1; 4609}; 4610typedef union cvmx_l2c_lfb2 cvmx_l2c_lfb2_t; 4611 4612/** 4613 * cvmx_l2c_lfb3 4614 * 4615 * L2C_LFB3 = L2C LFB DEBUG 3 Register 4616 * 4617 * Description: LFB High Water Mark Register 4618 */ 4619union cvmx_l2c_lfb3 { 4620 uint64_t u64; 4621 struct cvmx_l2c_lfb3_s { 4622#ifdef __BIG_ENDIAN_BITFIELD 4623 uint64_t reserved_5_63 : 59; 4624 uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable 4625 When clear, all STP/C(store partials) will take 2 cycles 4626 to complete (power-on default). 4627 When set, all STP/C(store partials) will take 4 cycles 4628 to complete. 4629 NOTE: It is recommended to keep this bit ALWAYS ZERO. */ 4630 uint64_t lfb_hwm : 4; /**< LFB High Water Mark 4631 Determines \#of LFB Entries in use before backpressure 4632 is asserted. 4633 HWM=0: 1 LFB Entry available 4634 - ... 4635 HWM=15: 16 LFB Entries available */ 4636#else 4637 uint64_t lfb_hwm : 4; 4638 uint64_t stpartdis : 1; 4639 uint64_t reserved_5_63 : 59; 4640#endif 4641 } s; 4642 struct cvmx_l2c_lfb3_cn30xx { 4643#ifdef __BIG_ENDIAN_BITFIELD 4644 uint64_t reserved_5_63 : 59; 4645 uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable 4646 When clear, all STP/C(store partials) will take 2 cycles 4647 to complete (power-on default). 4648 When set, all STP/C(store partials) will take 4 cycles 4649 to complete. 4650 NOTE: It is recommended to keep this bit ALWAYS ZERO. */ 4651 uint64_t reserved_2_3 : 2; 4652 uint64_t lfb_hwm : 2; /**< LFB High Water Mark 4653 Determines \#of LFB Entries in use before backpressure 4654 is asserted. 4655 HWM=0: 1 LFB Entry available 4656 - ... 4657 HWM=3: 4 LFB Entries available */ 4658#else 4659 uint64_t lfb_hwm : 2; 4660 uint64_t reserved_2_3 : 2; 4661 uint64_t stpartdis : 1; 4662 uint64_t reserved_5_63 : 59; 4663#endif 4664 } cn30xx; 4665 struct cvmx_l2c_lfb3_cn31xx { 4666#ifdef __BIG_ENDIAN_BITFIELD 4667 uint64_t reserved_5_63 : 59; 4668 uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable 4669 When clear, all STP/C(store partials) will take 2 cycles 4670 to complete (power-on default). 4671 When set, all STP/C(store partials) will take 4 cycles 4672 to complete. 4673 NOTE: It is recommended to keep this bit ALWAYS ZERO. */ 4674 uint64_t reserved_3_3 : 1; 4675 uint64_t lfb_hwm : 3; /**< LFB High Water Mark 4676 Determines \#of LFB Entries in use before backpressure 4677 is asserted. 4678 HWM=0: 1 LFB Entry available 4679 - ... 4680 HWM=7: 8 LFB Entries available */ 4681#else 4682 uint64_t lfb_hwm : 3; 4683 uint64_t reserved_3_3 : 1; 4684 uint64_t stpartdis : 1; 4685 uint64_t reserved_5_63 : 59; 4686#endif 4687 } cn31xx; 4688 struct cvmx_l2c_lfb3_s cn38xx; 4689 struct cvmx_l2c_lfb3_s cn38xxp2; 4690 struct cvmx_l2c_lfb3_cn31xx cn50xx; 4691 struct cvmx_l2c_lfb3_cn31xx cn52xx; 4692 struct cvmx_l2c_lfb3_cn31xx cn52xxp1; 4693 struct cvmx_l2c_lfb3_s cn56xx; 4694 struct cvmx_l2c_lfb3_s cn56xxp1; 4695 struct cvmx_l2c_lfb3_s cn58xx; 4696 struct cvmx_l2c_lfb3_s cn58xxp1; 4697}; 4698typedef union cvmx_l2c_lfb3 cvmx_l2c_lfb3_t; 4699 4700/** 4701 * cvmx_l2c_oob 4702 * 4703 * L2C_OOB = L2C Out of Bounds Global Enables 4704 * 4705 * Description: Defines DMA "Out of Bounds" global enables. 4706 */ 4707union cvmx_l2c_oob { 4708 uint64_t u64; 4709 struct cvmx_l2c_oob_s { 4710#ifdef __BIG_ENDIAN_BITFIELD 4711 uint64_t reserved_2_63 : 62; 4712 uint64_t dwbena : 1; /**< DMA Out of Bounds Range Checker for DMA DWB 4713 commands (Don't WriteBack). 4714 When enabled, any DMA DWB commands which hit 1-of-3 4715 out of bounds regions will be logged into 4716 L2C_INT_STAT[OOB*] CSRs and the DMA store WILL 4717 NOT occur. If the corresponding L2C_INT_EN[OOB*] 4718 is enabled, an interrupt will also be reported. */ 4719 uint64_t stena : 1; /**< DMA Out of Bounds Range Checker for DMA store 4720 commands (STF/P/T). 4721 When enabled, any DMA store commands (STF/P/T) which 4722 hit 1-of-3 out of bounds regions will be logged into 4723 L2C_INT_STAT[OOB*] CSRs and the DMA store WILL 4724 NOT occur. If the corresponding L2C_INT_EN[OOB*] 4725 is enabled, an interrupt will also be reported. */ 4726#else 4727 uint64_t stena : 1; 4728 uint64_t dwbena : 1; 4729 uint64_t reserved_2_63 : 62; 4730#endif 4731 } s; 4732 struct cvmx_l2c_oob_s cn52xx; 4733 struct cvmx_l2c_oob_s cn52xxp1; 4734 struct cvmx_l2c_oob_s cn56xx; 4735 struct cvmx_l2c_oob_s cn56xxp1; 4736}; 4737typedef union cvmx_l2c_oob cvmx_l2c_oob_t; 4738 4739/** 4740 * cvmx_l2c_oob1 4741 * 4742 * L2C_OOB1 = L2C Out of Bounds Range Checker 4743 * 4744 * Description: Defines DMA "Out of Bounds" region \#1. If a DMA initiated write transaction generates an address 4745 * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. 4746 */ 4747union cvmx_l2c_oob1 { 4748 uint64_t u64; 4749 struct cvmx_l2c_oob1_s { 4750#ifdef __BIG_ENDIAN_BITFIELD 4751 uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address 4752 When L2C_INT_STAT[OOB1]=1, this field indicates the 4753 DMA cacheline address. 4754 (addr[33:7] = full cacheline address captured) 4755 NOTE: FADR is locked down until L2C_INT_STAT[OOB1] 4756 is cleared. */ 4757 uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command 4758 When L2C_INT_STAT[OOB1]=1, this field indicates the 4759 type of DMA command. 4760 - 0: ST* (STF/P/T) 4761 - 1: DWB (Don't WriteBack) 4762 NOTE: FSRC is locked down until L2C_INT_STAT[OOB1] 4763 is cleared. */ 4764 uint64_t reserved_34_35 : 2; 4765 uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address 4766 (1MB granularity) */ 4767 uint64_t reserved_14_19 : 6; 4768 uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size 4769 (1MB granularity) 4770 Example: 0: 0MB / 1: 1MB 4771 The range check is for: 4772 (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) 4773 SW NOTE: SADR+SIZE could be setup to potentially wrap 4774 the 34bit ending bounds address. */ 4775#else 4776 uint64_t size : 14; 4777 uint64_t reserved_14_19 : 6; 4778 uint64_t sadr : 14; 4779 uint64_t reserved_34_35 : 2; 4780 uint64_t fsrc : 1; 4781 uint64_t fadr : 27; 4782#endif 4783 } s; 4784 struct cvmx_l2c_oob1_s cn52xx; 4785 struct cvmx_l2c_oob1_s cn52xxp1; 4786 struct cvmx_l2c_oob1_s cn56xx; 4787 struct cvmx_l2c_oob1_s cn56xxp1; 4788}; 4789typedef union cvmx_l2c_oob1 cvmx_l2c_oob1_t; 4790 4791/** 4792 * cvmx_l2c_oob2 4793 * 4794 * L2C_OOB2 = L2C Out of Bounds Range Checker 4795 * 4796 * Description: Defines DMA "Out of Bounds" region \#2. If a DMA initiated write transaction generates an address 4797 * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. 4798 */ 4799union cvmx_l2c_oob2 { 4800 uint64_t u64; 4801 struct cvmx_l2c_oob2_s { 4802#ifdef __BIG_ENDIAN_BITFIELD 4803 uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address 4804 When L2C_INT_STAT[OOB2]=1, this field indicates the 4805 DMA cacheline address. 4806 (addr[33:7] = full cacheline address captured) 4807 NOTE: FADR is locked down until L2C_INT_STAT[OOB2] 4808 is cleared. */ 4809 uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command 4810 When L2C_INT_STAT[OOB2]=1, this field indicates the 4811 type of DMA command. 4812 - 0: ST* (STF/P/T) 4813 - 1: DWB (Don't WriteBack) 4814 NOTE: FSRC is locked down until L2C_INT_STAT[OOB2] 4815 is cleared. */ 4816 uint64_t reserved_34_35 : 2; 4817 uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address 4818 (1MB granularity) */ 4819 uint64_t reserved_14_19 : 6; 4820 uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size 4821 (1MB granularity) 4822 Example: 0: 0MB / 1: 1MB 4823 The range check is for: 4824 (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) 4825 SW NOTE: SADR+SIZE could be setup to potentially wrap 4826 the 34bit ending bounds address. */ 4827#else 4828 uint64_t size : 14; 4829 uint64_t reserved_14_19 : 6; 4830 uint64_t sadr : 14; 4831 uint64_t reserved_34_35 : 2; 4832 uint64_t fsrc : 1; 4833 uint64_t fadr : 27; 4834#endif 4835 } s; 4836 struct cvmx_l2c_oob2_s cn52xx; 4837 struct cvmx_l2c_oob2_s cn52xxp1; 4838 struct cvmx_l2c_oob2_s cn56xx; 4839 struct cvmx_l2c_oob2_s cn56xxp1; 4840}; 4841typedef union cvmx_l2c_oob2 cvmx_l2c_oob2_t; 4842 4843/** 4844 * cvmx_l2c_oob3 4845 * 4846 * L2C_OOB3 = L2C Out of Bounds Range Checker 4847 * 4848 * Description: Defines DMA "Out of Bounds" region \#3. If a DMA initiated write transaction generates an address 4849 * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. 4850 */ 4851union cvmx_l2c_oob3 { 4852 uint64_t u64; 4853 struct cvmx_l2c_oob3_s { 4854#ifdef __BIG_ENDIAN_BITFIELD 4855 uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address 4856 When L2C_INT_STAT[OOB3]=1, this field indicates the 4857 DMA cacheline address. 4858 (addr[33:7] = full cacheline address captured) 4859 NOTE: FADR is locked down until L2C_INT_STAT[00B3] 4860 is cleared. */ 4861 uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command 4862 When L2C_INT_STAT[OOB3]=1, this field indicates the 4863 type of DMA command. 4864 - 0: ST* (STF/P/T) 4865 - 1: DWB (Don't WriteBack) 4866 NOTE: FSRC is locked down until L2C_INT_STAT[00B3] 4867 is cleared. */ 4868 uint64_t reserved_34_35 : 2; 4869 uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address 4870 (1MB granularity) */ 4871 uint64_t reserved_14_19 : 6; 4872 uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size 4873 (1MB granularity) 4874 Example: 0: 0MB / 1: 1MB 4875 The range check is for: 4876 (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) 4877 SW NOTE: SADR+SIZE could be setup to potentially wrap 4878 the 34bit ending bounds address. */ 4879#else 4880 uint64_t size : 14; 4881 uint64_t reserved_14_19 : 6; 4882 uint64_t sadr : 14; 4883 uint64_t reserved_34_35 : 2; 4884 uint64_t fsrc : 1; 4885 uint64_t fadr : 27; 4886#endif 4887 } s; 4888 struct cvmx_l2c_oob3_s cn52xx; 4889 struct cvmx_l2c_oob3_s cn52xxp1; 4890 struct cvmx_l2c_oob3_s cn56xx; 4891 struct cvmx_l2c_oob3_s cn56xxp1; 4892}; 4893typedef union cvmx_l2c_oob3 cvmx_l2c_oob3_t; 4894 4895/** 4896 * cvmx_l2c_pfc# 4897 * 4898 * L2C_PFC0 = L2 Performance Counter \#0 4899 * 4900 * Description: 4901 */ 4902union cvmx_l2c_pfcx { 4903 uint64_t u64; 4904 struct cvmx_l2c_pfcx_s { 4905#ifdef __BIG_ENDIAN_BITFIELD 4906 uint64_t reserved_36_63 : 28; 4907 uint64_t pfcnt0 : 36; /**< Performance Counter \#0 */ 4908#else 4909 uint64_t pfcnt0 : 36; 4910 uint64_t reserved_36_63 : 28; 4911#endif 4912 } s; 4913 struct cvmx_l2c_pfcx_s cn30xx; 4914 struct cvmx_l2c_pfcx_s cn31xx; 4915 struct cvmx_l2c_pfcx_s cn38xx; 4916 struct cvmx_l2c_pfcx_s cn38xxp2; 4917 struct cvmx_l2c_pfcx_s cn50xx; 4918 struct cvmx_l2c_pfcx_s cn52xx; 4919 struct cvmx_l2c_pfcx_s cn52xxp1; 4920 struct cvmx_l2c_pfcx_s cn56xx; 4921 struct cvmx_l2c_pfcx_s cn56xxp1; 4922 struct cvmx_l2c_pfcx_s cn58xx; 4923 struct cvmx_l2c_pfcx_s cn58xxp1; 4924}; 4925typedef union cvmx_l2c_pfcx cvmx_l2c_pfcx_t; 4926 4927/** 4928 * cvmx_l2c_pfctl 4929 * 4930 * L2C_PFCTL = L2 Performance Counter Control Register 4931 * 4932 * Description: Controls the actions of the 4 Performance Counters 4933 * 4934 * Notes: 4935 * - There are four 36b performance counter registers which can simultaneously count events. 4936 * Each Counter's event is programmably selected via the corresponding CNTxSEL field: 4937 * CNTxSEL[5:0] Event 4938 * -----------------+----------------------- 4939 * 0 | Cycles 4940 * 1 | L2 LDI Command Miss (NOTE: Both PP and IOB are cabable of generating LDI) 4941 * 2 | L2 LDI Command Hit (NOTE: Both PP and IOB are cabable of generating LDI) 4942 * 3 | L2 non-LDI Command Miss 4943 * 4 | L2 non-LDI Command Hit 4944 * 5 | L2 Miss (total) 4945 * 6 | L2 Hit (total) 4946 * 7 | L2 Victim Buffer Hit (Retry Probe) 4947 * 8 | LFB-NQ Index Conflict 4948 * 9 | L2 Tag Probe (issued - could be VB-Retried) 4949 * 10 | L2 Tag Update (completed - note: some CMD types do not update) 4950 * 11 | L2 Tag Probe Completed (beyond VB-RTY window) 4951 * 12 | L2 Tag Dirty Victim 4952 * 13 | L2 Data Store NOP 4953 * 14 | L2 Data Store READ 4954 * 15 | L2 Data Store WRITE 4955 * 16 | Memory Fill Data valid (1 strobe/32B) 4956 * 17 | Memory Write Request 4957 * 18 | Memory Read Request 4958 * 19 | Memory Write Data valid (1 strobe/32B) 4959 * 20 | XMC NOP (XMC Bus Idle) 4960 * 21 | XMC LDT (Load-Through Request) 4961 * 22 | XMC LDI (L2 Load I-Stream Request) 4962 * 23 | XMC LDD (L2 Load D-stream Request) 4963 * 24 | XMC STF (L2 Store Full cacheline Request) 4964 * 25 | XMC STT (L2 Store Through Request) 4965 * 26 | XMC STP (L2 Store Partial Request) 4966 * 27 | XMC STC (L2 Store Conditional Request) 4967 * 28 | XMC DWB (L2 Don't WriteBack Request) 4968 * 29 | XMC PL2 (L2 Prefetch Request) 4969 * 30 | XMC PSL1 (L1 Prefetch Request) 4970 * 31 | XMC IOBLD 4971 * 32 | XMC IOBST 4972 * 33 | XMC IOBDMA 4973 * 34 | XMC IOBRSP 4974 * 35 | XMD Bus valid (all) 4975 * 36 | XMD Bus valid (DST=L2C) Memory Data 4976 * 37 | XMD Bus valid (DST=IOB) REFL Data 4977 * 38 | XMD Bus valid (DST=PP) IOBRSP Data 4978 * 39 | RSC NOP 4979 * 40 | RSC STDN 4980 * 41 | RSC FILL 4981 * 42 | RSC REFL 4982 * 43 | RSC STIN 4983 * 44 | RSC SCIN 4984 * 45 | RSC SCFL 4985 * 46 | RSC SCDN 4986 * 47 | RSD Data Valid 4987 * 48 | RSD Data Valid (FILL) 4988 * 49 | RSD Data Valid (STRSP) 4989 * 50 | RSD Data Valid (REFL) 4990 * 51 | LRF-REQ (LFB-NQ) 4991 * 52 | DT RD-ALLOC (LDD/PSL1 Commands) 4992 * 53 | DT WR-INVAL (ST* Commands) 4993 */ 4994union cvmx_l2c_pfctl { 4995 uint64_t u64; 4996 struct cvmx_l2c_pfctl_s { 4997#ifdef __BIG_ENDIAN_BITFIELD 4998 uint64_t reserved_36_63 : 28; 4999 uint64_t cnt3rdclr : 1; /**< Performance Counter 3 Read Clear 5000 When set, all CSR reads of the L2C_PFC3 5001 register will auto-clear the counter. This allows 5002 SW to maintain 'cumulative' counters in SW. 5003 NOTE: If the CSR read occurs in the same cycle as 5004 the 'event' to be counted, the counter will 5005 properly reflect the event. */ 5006 uint64_t cnt2rdclr : 1; /**< Performance Counter 2 Read Clear 5007 When set, all CSR reads of the L2C_PFC2 5008 register will auto-clear the counter. This allows 5009 SW to maintain 'cumulative' counters in SW. 5010 NOTE: If the CSR read occurs in the same cycle as 5011 the 'event' to be counted, the counter will 5012 properly reflect the event. */ 5013 uint64_t cnt1rdclr : 1; /**< Performance Counter 1 Read Clear 5014 When set, all CSR reads of the L2C_PFC1 5015 register will auto-clear the counter. This allows 5016 SW to maintain 'cumulative' counters in SW. 5017 NOTE: If the CSR read occurs in the same cycle as 5018 the 'event' to be counted, the counter will 5019 properly reflect the event. */ 5020 uint64_t cnt0rdclr : 1; /**< Performance Counter 0 Read Clear 5021 When set, all CSR reads of the L2C_PFC0 5022 register will 'auto-clear' the counter. This allows 5023 SW to maintain accurate 'cumulative' counters. 5024 NOTE: If the CSR read occurs in the same cycle as 5025 the 'event' to be counted, the counter will 5026 properly reflect the event. */ 5027 uint64_t cnt3ena : 1; /**< Performance Counter 3 Enable 5028 When this bit is set, the performance counter 5029 is enabled. */ 5030 uint64_t cnt3clr : 1; /**< Performance Counter 3 Clear 5031 When the CSR write occurs, if this bit is set, 5032 the performance counter is cleared. Otherwise, 5033 it will resume counting from its current value. */ 5034 uint64_t cnt3sel : 6; /**< Performance Counter 3 Event Selector 5035 (see list of selectable events to count in NOTES) */ 5036 uint64_t cnt2ena : 1; /**< Performance Counter 2 Enable 5037 When this bit is set, the performance counter 5038 is enabled. */ 5039 uint64_t cnt2clr : 1; /**< Performance Counter 2 Clear 5040 When the CSR write occurs, if this bit is set, 5041 the performance counter is cleared. Otherwise, 5042 it will resume counting from its current value. */ 5043 uint64_t cnt2sel : 6; /**< Performance Counter 2 Event Selector 5044 (see list of selectable events to count in NOTES) */ 5045 uint64_t cnt1ena : 1; /**< Performance Counter 1 Enable 5046 When this bit is set, the performance counter 5047 is enabled. */ 5048 uint64_t cnt1clr : 1; /**< Performance Counter 1 Clear 5049 When the CSR write occurs, if this bit is set, 5050 the performance counter is cleared. Otherwise, 5051 it will resume counting from its current value. */ 5052 uint64_t cnt1sel : 6; /**< Performance Counter 1 Event Selector 5053 (see list of selectable events to count in NOTES) */ 5054 uint64_t cnt0ena : 1; /**< Performance Counter 0 Enable 5055 When this bit is set, the performance counter 5056 is enabled. */ 5057 uint64_t cnt0clr : 1; /**< Performance Counter 0 Clear 5058 When the CSR write occurs, if this bit is set, 5059 the performance counter is cleared. Otherwise, 5060 it will resume counting from its current value. */ 5061 uint64_t cnt0sel : 6; /**< Performance Counter 0 Event Selector 5062 (see list of selectable events to count in NOTES) */ 5063#else 5064 uint64_t cnt0sel : 6; 5065 uint64_t cnt0clr : 1; 5066 uint64_t cnt0ena : 1; 5067 uint64_t cnt1sel : 6; 5068 uint64_t cnt1clr : 1; 5069 uint64_t cnt1ena : 1; 5070 uint64_t cnt2sel : 6; 5071 uint64_t cnt2clr : 1; 5072 uint64_t cnt2ena : 1; 5073 uint64_t cnt3sel : 6; 5074 uint64_t cnt3clr : 1; 5075 uint64_t cnt3ena : 1; 5076 uint64_t cnt0rdclr : 1; 5077 uint64_t cnt1rdclr : 1; 5078 uint64_t cnt2rdclr : 1; 5079 uint64_t cnt3rdclr : 1; 5080 uint64_t reserved_36_63 : 28; 5081#endif 5082 } s; 5083 struct cvmx_l2c_pfctl_s cn30xx; 5084 struct cvmx_l2c_pfctl_s cn31xx; 5085 struct cvmx_l2c_pfctl_s cn38xx; 5086 struct cvmx_l2c_pfctl_s cn38xxp2; 5087 struct cvmx_l2c_pfctl_s cn50xx; 5088 struct cvmx_l2c_pfctl_s cn52xx; 5089 struct cvmx_l2c_pfctl_s cn52xxp1; 5090 struct cvmx_l2c_pfctl_s cn56xx; 5091 struct cvmx_l2c_pfctl_s cn56xxp1; 5092 struct cvmx_l2c_pfctl_s cn58xx; 5093 struct cvmx_l2c_pfctl_s cn58xxp1; 5094}; 5095typedef union cvmx_l2c_pfctl cvmx_l2c_pfctl_t; 5096 5097/** 5098 * cvmx_l2c_ppgrp 5099 * 5100 * L2C_PPGRP = L2C PP Group Number 5101 * 5102 * Description: Defines the PP(Packet Processor) PLC Group \# (0,1,2) 5103 */ 5104union cvmx_l2c_ppgrp { 5105 uint64_t u64; 5106 struct cvmx_l2c_ppgrp_s { 5107#ifdef __BIG_ENDIAN_BITFIELD 5108 uint64_t reserved_24_63 : 40; 5109 uint64_t pp11grp : 2; /**< PP11 PLC Group# (0,1,2) */ 5110 uint64_t pp10grp : 2; /**< PP10 PLC Group# (0,1,2) */ 5111 uint64_t pp9grp : 2; /**< PP9 PLC Group# (0,1,2) */ 5112 uint64_t pp8grp : 2; /**< PP8 PLC Group# (0,1,2) */ 5113 uint64_t pp7grp : 2; /**< PP7 PLC Group# (0,1,2) */ 5114 uint64_t pp6grp : 2; /**< PP6 PLC Group# (0,1,2) */ 5115 uint64_t pp5grp : 2; /**< PP5 PLC Group# (0,1,2) */ 5116 uint64_t pp4grp : 2; /**< PP4 PLC Group# (0,1,2) */ 5117 uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */ 5118 uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */ 5119 uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */ 5120 uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */ 5121#else 5122 uint64_t pp0grp : 2; 5123 uint64_t pp1grp : 2; 5124 uint64_t pp2grp : 2; 5125 uint64_t pp3grp : 2; 5126 uint64_t pp4grp : 2; 5127 uint64_t pp5grp : 2; 5128 uint64_t pp6grp : 2; 5129 uint64_t pp7grp : 2; 5130 uint64_t pp8grp : 2; 5131 uint64_t pp9grp : 2; 5132 uint64_t pp10grp : 2; 5133 uint64_t pp11grp : 2; 5134 uint64_t reserved_24_63 : 40; 5135#endif 5136 } s; 5137 struct cvmx_l2c_ppgrp_cn52xx { 5138#ifdef __BIG_ENDIAN_BITFIELD 5139 uint64_t reserved_8_63 : 56; 5140 uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */ 5141 uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */ 5142 uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */ 5143 uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */ 5144#else 5145 uint64_t pp0grp : 2; 5146 uint64_t pp1grp : 2; 5147 uint64_t pp2grp : 2; 5148 uint64_t pp3grp : 2; 5149 uint64_t reserved_8_63 : 56; 5150#endif 5151 } cn52xx; 5152 struct cvmx_l2c_ppgrp_cn52xx cn52xxp1; 5153 struct cvmx_l2c_ppgrp_s cn56xx; 5154 struct cvmx_l2c_ppgrp_s cn56xxp1; 5155}; 5156typedef union cvmx_l2c_ppgrp cvmx_l2c_ppgrp_t; 5157 5158/** 5159 * cvmx_l2c_qos_iob# 5160 * 5161 * L2C_QOS_IOB = L2C IOB QOS level 5162 * 5163 * Description: 5164 */ 5165union cvmx_l2c_qos_iobx { 5166 uint64_t u64; 5167 struct cvmx_l2c_qos_iobx_s { 5168#ifdef __BIG_ENDIAN_BITFIELD 5169 uint64_t reserved_7_63 : 57; 5170 uint64_t dwblvl : 3; /**< QOS level for DWB commands. */ 5171 uint64_t reserved_3_3 : 1; 5172 uint64_t lvl : 3; /**< QOS level for non-DWB commands. */ 5173#else 5174 uint64_t lvl : 3; 5175 uint64_t reserved_3_3 : 1; 5176 uint64_t dwblvl : 3; 5177 uint64_t reserved_7_63 : 57; 5178#endif 5179 } s; 5180 struct cvmx_l2c_qos_iobx_cn61xx { 5181#ifdef __BIG_ENDIAN_BITFIELD 5182 uint64_t reserved_6_63 : 58; 5183 uint64_t dwblvl : 2; /**< QOS level for DWB commands. */ 5184 uint64_t reserved_2_3 : 2; 5185 uint64_t lvl : 2; /**< QOS level for non-DWB commands. */ 5186#else 5187 uint64_t lvl : 2; 5188 uint64_t reserved_2_3 : 2; 5189 uint64_t dwblvl : 2; 5190 uint64_t reserved_6_63 : 58; 5191#endif 5192 } cn61xx; 5193 struct cvmx_l2c_qos_iobx_cn61xx cn63xx; 5194 struct cvmx_l2c_qos_iobx_cn61xx cn63xxp1; 5195 struct cvmx_l2c_qos_iobx_cn61xx cn66xx; 5196 struct cvmx_l2c_qos_iobx_s cn68xx; 5197 struct cvmx_l2c_qos_iobx_s cn68xxp1; 5198 struct cvmx_l2c_qos_iobx_cn61xx cnf71xx; 5199}; 5200typedef union cvmx_l2c_qos_iobx cvmx_l2c_qos_iobx_t; 5201 5202/** 5203 * cvmx_l2c_qos_pp# 5204 * 5205 * L2C_QOS_PP = L2C PP QOS level 5206 * 5207 * Description: 5208 */ 5209union cvmx_l2c_qos_ppx { 5210 uint64_t u64; 5211 struct cvmx_l2c_qos_ppx_s { 5212#ifdef __BIG_ENDIAN_BITFIELD 5213 uint64_t reserved_3_63 : 61; 5214 uint64_t lvl : 3; /**< QOS level to use for this PP. */ 5215#else 5216 uint64_t lvl : 3; 5217 uint64_t reserved_3_63 : 61; 5218#endif 5219 } s; 5220 struct cvmx_l2c_qos_ppx_cn61xx { 5221#ifdef __BIG_ENDIAN_BITFIELD 5222 uint64_t reserved_2_63 : 62; 5223 uint64_t lvl : 2; /**< QOS level to use for this PP. */ 5224#else 5225 uint64_t lvl : 2; 5226 uint64_t reserved_2_63 : 62; 5227#endif 5228 } cn61xx; 5229 struct cvmx_l2c_qos_ppx_cn61xx cn63xx; 5230 struct cvmx_l2c_qos_ppx_cn61xx cn63xxp1; 5231 struct cvmx_l2c_qos_ppx_cn61xx cn66xx; 5232 struct cvmx_l2c_qos_ppx_s cn68xx; 5233 struct cvmx_l2c_qos_ppx_s cn68xxp1; 5234 struct cvmx_l2c_qos_ppx_cn61xx cnf71xx; 5235}; 5236typedef union cvmx_l2c_qos_ppx cvmx_l2c_qos_ppx_t; 5237 5238/** 5239 * cvmx_l2c_qos_wgt 5240 * 5241 * L2C_QOS_WGT = L2C QOS weights 5242 * 5243 */ 5244union cvmx_l2c_qos_wgt { 5245 uint64_t u64; 5246 struct cvmx_l2c_qos_wgt_s { 5247#ifdef __BIG_ENDIAN_BITFIELD 5248 uint64_t wgt7 : 8; /**< Weight for QOS level 7 */ 5249 uint64_t wgt6 : 8; /**< Weight for QOS level 6 */ 5250 uint64_t wgt5 : 8; /**< Weight for QOS level 5 */ 5251 uint64_t wgt4 : 8; /**< Weight for QOS level 4 */ 5252 uint64_t wgt3 : 8; /**< Weight for QOS level 3 */ 5253 uint64_t wgt2 : 8; /**< Weight for QOS level 2 */ 5254 uint64_t wgt1 : 8; /**< Weight for QOS level 1 */ 5255 uint64_t wgt0 : 8; /**< Weight for QOS level 0 */ 5256#else 5257 uint64_t wgt0 : 8; 5258 uint64_t wgt1 : 8; 5259 uint64_t wgt2 : 8; 5260 uint64_t wgt3 : 8; 5261 uint64_t wgt4 : 8; 5262 uint64_t wgt5 : 8; 5263 uint64_t wgt6 : 8; 5264 uint64_t wgt7 : 8; 5265#endif 5266 } s; 5267 struct cvmx_l2c_qos_wgt_cn61xx { 5268#ifdef __BIG_ENDIAN_BITFIELD 5269 uint64_t reserved_32_63 : 32; 5270 uint64_t wgt3 : 8; /**< Weight for QOS level 3 */ 5271 uint64_t wgt2 : 8; /**< Weight for QOS level 2 */ 5272 uint64_t wgt1 : 8; /**< Weight for QOS level 1 */ 5273 uint64_t wgt0 : 8; /**< Weight for QOS level 0 */ 5274#else 5275 uint64_t wgt0 : 8; 5276 uint64_t wgt1 : 8; 5277 uint64_t wgt2 : 8; 5278 uint64_t wgt3 : 8; 5279 uint64_t reserved_32_63 : 32; 5280#endif 5281 } cn61xx; 5282 struct cvmx_l2c_qos_wgt_cn61xx cn63xx; 5283 struct cvmx_l2c_qos_wgt_cn61xx cn63xxp1; 5284 struct cvmx_l2c_qos_wgt_cn61xx cn66xx; 5285 struct cvmx_l2c_qos_wgt_s cn68xx; 5286 struct cvmx_l2c_qos_wgt_s cn68xxp1; 5287 struct cvmx_l2c_qos_wgt_cn61xx cnf71xx; 5288}; 5289typedef union cvmx_l2c_qos_wgt cvmx_l2c_qos_wgt_t; 5290 5291/** 5292 * cvmx_l2c_rsc#_pfc 5293 * 5294 * L2C_RSC_PFC = L2C RSC Performance Counter(s) 5295 * 5296 */ 5297union cvmx_l2c_rscx_pfc { 5298 uint64_t u64; 5299 struct cvmx_l2c_rscx_pfc_s { 5300#ifdef __BIG_ENDIAN_BITFIELD 5301 uint64_t count : 64; /**< Current counter value */ 5302#else 5303 uint64_t count : 64; 5304#endif 5305 } s; 5306 struct cvmx_l2c_rscx_pfc_s cn61xx; 5307 struct cvmx_l2c_rscx_pfc_s cn63xx; 5308 struct cvmx_l2c_rscx_pfc_s cn63xxp1; 5309 struct cvmx_l2c_rscx_pfc_s cn66xx; 5310 struct cvmx_l2c_rscx_pfc_s cn68xx; 5311 struct cvmx_l2c_rscx_pfc_s cn68xxp1; 5312 struct cvmx_l2c_rscx_pfc_s cnf71xx; 5313}; 5314typedef union cvmx_l2c_rscx_pfc cvmx_l2c_rscx_pfc_t; 5315 5316/** 5317 * cvmx_l2c_rsd#_pfc 5318 * 5319 * L2C_RSD_PFC = L2C RSD Performance Counter(s) 5320 * 5321 */ 5322union cvmx_l2c_rsdx_pfc { 5323 uint64_t u64; 5324 struct cvmx_l2c_rsdx_pfc_s { 5325#ifdef __BIG_ENDIAN_BITFIELD 5326 uint64_t count : 64; /**< Current counter value */ 5327#else 5328 uint64_t count : 64; 5329#endif 5330 } s; 5331 struct cvmx_l2c_rsdx_pfc_s cn61xx; 5332 struct cvmx_l2c_rsdx_pfc_s cn63xx; 5333 struct cvmx_l2c_rsdx_pfc_s cn63xxp1; 5334 struct cvmx_l2c_rsdx_pfc_s cn66xx; 5335 struct cvmx_l2c_rsdx_pfc_s cn68xx; 5336 struct cvmx_l2c_rsdx_pfc_s cn68xxp1; 5337 struct cvmx_l2c_rsdx_pfc_s cnf71xx; 5338}; 5339typedef union cvmx_l2c_rsdx_pfc cvmx_l2c_rsdx_pfc_t; 5340 5341/** 5342 * cvmx_l2c_spar0 5343 * 5344 * L2C_SPAR0 = L2 Set Partitioning Register (PP0-3) 5345 * 5346 * Description: L2 Set Partitioning Register 5347 * 5348 * Notes: 5349 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 5350 * set for replacement. 5351 * - There MUST ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 5352 * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers 5353 * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers 5354 */ 5355union cvmx_l2c_spar0 { 5356 uint64_t u64; 5357 struct cvmx_l2c_spar0_s { 5358#ifdef __BIG_ENDIAN_BITFIELD 5359 uint64_t reserved_32_63 : 32; 5360 uint64_t umsk3 : 8; /**< PP[3] L2 'DO NOT USE' set partition mask */ 5361 uint64_t umsk2 : 8; /**< PP[2] L2 'DO NOT USE' set partition mask */ 5362 uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */ 5363 uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */ 5364#else 5365 uint64_t umsk0 : 8; 5366 uint64_t umsk1 : 8; 5367 uint64_t umsk2 : 8; 5368 uint64_t umsk3 : 8; 5369 uint64_t reserved_32_63 : 32; 5370#endif 5371 } s; 5372 struct cvmx_l2c_spar0_cn30xx { 5373#ifdef __BIG_ENDIAN_BITFIELD 5374 uint64_t reserved_4_63 : 60; 5375 uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */ 5376#else 5377 uint64_t umsk0 : 4; 5378 uint64_t reserved_4_63 : 60; 5379#endif 5380 } cn30xx; 5381 struct cvmx_l2c_spar0_cn31xx { 5382#ifdef __BIG_ENDIAN_BITFIELD 5383 uint64_t reserved_12_63 : 52; 5384 uint64_t umsk1 : 4; /**< PP[1] L2 'DO NOT USE' set partition mask */ 5385 uint64_t reserved_4_7 : 4; 5386 uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */ 5387#else 5388 uint64_t umsk0 : 4; 5389 uint64_t reserved_4_7 : 4; 5390 uint64_t umsk1 : 4; 5391 uint64_t reserved_12_63 : 52; 5392#endif 5393 } cn31xx; 5394 struct cvmx_l2c_spar0_s cn38xx; 5395 struct cvmx_l2c_spar0_s cn38xxp2; 5396 struct cvmx_l2c_spar0_cn50xx { 5397#ifdef __BIG_ENDIAN_BITFIELD 5398 uint64_t reserved_16_63 : 48; 5399 uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */ 5400 uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */ 5401#else 5402 uint64_t umsk0 : 8; 5403 uint64_t umsk1 : 8; 5404 uint64_t reserved_16_63 : 48; 5405#endif 5406 } cn50xx; 5407 struct cvmx_l2c_spar0_s cn52xx; 5408 struct cvmx_l2c_spar0_s cn52xxp1; 5409 struct cvmx_l2c_spar0_s cn56xx; 5410 struct cvmx_l2c_spar0_s cn56xxp1; 5411 struct cvmx_l2c_spar0_s cn58xx; 5412 struct cvmx_l2c_spar0_s cn58xxp1; 5413}; 5414typedef union cvmx_l2c_spar0 cvmx_l2c_spar0_t; 5415 5416/** 5417 * cvmx_l2c_spar1 5418 * 5419 * L2C_SPAR1 = L2 Set Partitioning Register (PP4-7) 5420 * 5421 * Description: L2 Set Partitioning Register 5422 * 5423 * Notes: 5424 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 5425 * set for replacement. 5426 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 5427 * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers 5428 * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers 5429 */ 5430union cvmx_l2c_spar1 { 5431 uint64_t u64; 5432 struct cvmx_l2c_spar1_s { 5433#ifdef __BIG_ENDIAN_BITFIELD 5434 uint64_t reserved_32_63 : 32; 5435 uint64_t umsk7 : 8; /**< PP[7] L2 'DO NOT USE' set partition mask */ 5436 uint64_t umsk6 : 8; /**< PP[6] L2 'DO NOT USE' set partition mask */ 5437 uint64_t umsk5 : 8; /**< PP[5] L2 'DO NOT USE' set partition mask */ 5438 uint64_t umsk4 : 8; /**< PP[4] L2 'DO NOT USE' set partition mask */ 5439#else 5440 uint64_t umsk4 : 8; 5441 uint64_t umsk5 : 8; 5442 uint64_t umsk6 : 8; 5443 uint64_t umsk7 : 8; 5444 uint64_t reserved_32_63 : 32; 5445#endif 5446 } s; 5447 struct cvmx_l2c_spar1_s cn38xx; 5448 struct cvmx_l2c_spar1_s cn38xxp2; 5449 struct cvmx_l2c_spar1_s cn56xx; 5450 struct cvmx_l2c_spar1_s cn56xxp1; 5451 struct cvmx_l2c_spar1_s cn58xx; 5452 struct cvmx_l2c_spar1_s cn58xxp1; 5453}; 5454typedef union cvmx_l2c_spar1 cvmx_l2c_spar1_t; 5455 5456/** 5457 * cvmx_l2c_spar2 5458 * 5459 * L2C_SPAR2 = L2 Set Partitioning Register (PP8-11) 5460 * 5461 * Description: L2 Set Partitioning Register 5462 * 5463 * Notes: 5464 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 5465 * set for replacement. 5466 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 5467 * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers 5468 * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers 5469 */ 5470union cvmx_l2c_spar2 { 5471 uint64_t u64; 5472 struct cvmx_l2c_spar2_s { 5473#ifdef __BIG_ENDIAN_BITFIELD 5474 uint64_t reserved_32_63 : 32; 5475 uint64_t umsk11 : 8; /**< PP[11] L2 'DO NOT USE' set partition mask */ 5476 uint64_t umsk10 : 8; /**< PP[10] L2 'DO NOT USE' set partition mask */ 5477 uint64_t umsk9 : 8; /**< PP[9] L2 'DO NOT USE' set partition mask */ 5478 uint64_t umsk8 : 8; /**< PP[8] L2 'DO NOT USE' set partition mask */ 5479#else 5480 uint64_t umsk8 : 8; 5481 uint64_t umsk9 : 8; 5482 uint64_t umsk10 : 8; 5483 uint64_t umsk11 : 8; 5484 uint64_t reserved_32_63 : 32; 5485#endif 5486 } s; 5487 struct cvmx_l2c_spar2_s cn38xx; 5488 struct cvmx_l2c_spar2_s cn38xxp2; 5489 struct cvmx_l2c_spar2_s cn56xx; 5490 struct cvmx_l2c_spar2_s cn56xxp1; 5491 struct cvmx_l2c_spar2_s cn58xx; 5492 struct cvmx_l2c_spar2_s cn58xxp1; 5493}; 5494typedef union cvmx_l2c_spar2 cvmx_l2c_spar2_t; 5495 5496/** 5497 * cvmx_l2c_spar3 5498 * 5499 * L2C_SPAR3 = L2 Set Partitioning Register (PP12-15) 5500 * 5501 * Description: L2 Set Partitioning Register 5502 * 5503 * Notes: 5504 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 5505 * set for replacement. 5506 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 5507 * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers 5508 * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers 5509 */ 5510union cvmx_l2c_spar3 { 5511 uint64_t u64; 5512 struct cvmx_l2c_spar3_s { 5513#ifdef __BIG_ENDIAN_BITFIELD 5514 uint64_t reserved_32_63 : 32; 5515 uint64_t umsk15 : 8; /**< PP[15] L2 'DO NOT USE' set partition mask */ 5516 uint64_t umsk14 : 8; /**< PP[14] L2 'DO NOT USE' set partition mask */ 5517 uint64_t umsk13 : 8; /**< PP[13] L2 'DO NOT USE' set partition mask */ 5518 uint64_t umsk12 : 8; /**< PP[12] L2 'DO NOT USE' set partition mask */ 5519#else 5520 uint64_t umsk12 : 8; 5521 uint64_t umsk13 : 8; 5522 uint64_t umsk14 : 8; 5523 uint64_t umsk15 : 8; 5524 uint64_t reserved_32_63 : 32; 5525#endif 5526 } s; 5527 struct cvmx_l2c_spar3_s cn38xx; 5528 struct cvmx_l2c_spar3_s cn38xxp2; 5529 struct cvmx_l2c_spar3_s cn58xx; 5530 struct cvmx_l2c_spar3_s cn58xxp1; 5531}; 5532typedef union cvmx_l2c_spar3 cvmx_l2c_spar3_t; 5533 5534/** 5535 * cvmx_l2c_spar4 5536 * 5537 * L2C_SPAR4 = L2 Set Partitioning Register (IOB) 5538 * 5539 * Description: L2 Set Partitioning Register 5540 * 5541 * Notes: 5542 * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that 5543 * set for replacement. 5544 * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation 5545 * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers 5546 * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers 5547 */ 5548union cvmx_l2c_spar4 { 5549 uint64_t u64; 5550 struct cvmx_l2c_spar4_s { 5551#ifdef __BIG_ENDIAN_BITFIELD 5552 uint64_t reserved_8_63 : 56; 5553 uint64_t umskiob : 8; /**< IOB L2 'DO NOT USE' set partition mask */ 5554#else 5555 uint64_t umskiob : 8; 5556 uint64_t reserved_8_63 : 56; 5557#endif 5558 } s; 5559 struct cvmx_l2c_spar4_cn30xx { 5560#ifdef __BIG_ENDIAN_BITFIELD 5561 uint64_t reserved_4_63 : 60; 5562 uint64_t umskiob : 4; /**< IOB L2 'DO NOT USE' set partition mask */ 5563#else 5564 uint64_t umskiob : 4; 5565 uint64_t reserved_4_63 : 60; 5566#endif 5567 } cn30xx; 5568 struct cvmx_l2c_spar4_cn30xx cn31xx; 5569 struct cvmx_l2c_spar4_s cn38xx; 5570 struct cvmx_l2c_spar4_s cn38xxp2; 5571 struct cvmx_l2c_spar4_s cn50xx; 5572 struct cvmx_l2c_spar4_s cn52xx; 5573 struct cvmx_l2c_spar4_s cn52xxp1; 5574 struct cvmx_l2c_spar4_s cn56xx; 5575 struct cvmx_l2c_spar4_s cn56xxp1; 5576 struct cvmx_l2c_spar4_s cn58xx; 5577 struct cvmx_l2c_spar4_s cn58xxp1; 5578}; 5579typedef union cvmx_l2c_spar4 cvmx_l2c_spar4_t; 5580 5581/** 5582 * cvmx_l2c_tad#_ecc0 5583 * 5584 * L2C_TAD_ECC0 = L2C ECC logging 5585 * 5586 * Description: holds the syndromes for a L2D read generated from L2C_XMC_CMD 5587 */ 5588union cvmx_l2c_tadx_ecc0 { 5589 uint64_t u64; 5590 struct cvmx_l2c_tadx_ecc0_s { 5591#ifdef __BIG_ENDIAN_BITFIELD 5592 uint64_t reserved_58_63 : 6; 5593 uint64_t ow3ecc : 10; /**< ECC for OW3 of cache block */ 5594 uint64_t reserved_42_47 : 6; 5595 uint64_t ow2ecc : 10; /**< ECC for OW2 of cache block */ 5596 uint64_t reserved_26_31 : 6; 5597 uint64_t ow1ecc : 10; /**< ECC for OW1 of cache block */ 5598 uint64_t reserved_10_15 : 6; 5599 uint64_t ow0ecc : 10; /**< ECC for OW0 of cache block */ 5600#else 5601 uint64_t ow0ecc : 10; 5602 uint64_t reserved_10_15 : 6; 5603 uint64_t ow1ecc : 10; 5604 uint64_t reserved_26_31 : 6; 5605 uint64_t ow2ecc : 10; 5606 uint64_t reserved_42_47 : 6; 5607 uint64_t ow3ecc : 10; 5608 uint64_t reserved_58_63 : 6; 5609#endif 5610 } s; 5611 struct cvmx_l2c_tadx_ecc0_s cn61xx; 5612 struct cvmx_l2c_tadx_ecc0_s cn63xx; 5613 struct cvmx_l2c_tadx_ecc0_s cn63xxp1; 5614 struct cvmx_l2c_tadx_ecc0_s cn66xx; 5615 struct cvmx_l2c_tadx_ecc0_s cn68xx; 5616 struct cvmx_l2c_tadx_ecc0_s cn68xxp1; 5617 struct cvmx_l2c_tadx_ecc0_s cnf71xx; 5618}; 5619typedef union cvmx_l2c_tadx_ecc0 cvmx_l2c_tadx_ecc0_t; 5620 5621/** 5622 * cvmx_l2c_tad#_ecc1 5623 * 5624 * L2C_TAD_ECC1 = L2C ECC logging 5625 * 5626 * Description: holds the syndromes for a L2D read generated from L2C_XMC_CMD 5627 */ 5628union cvmx_l2c_tadx_ecc1 { 5629 uint64_t u64; 5630 struct cvmx_l2c_tadx_ecc1_s { 5631#ifdef __BIG_ENDIAN_BITFIELD 5632 uint64_t reserved_58_63 : 6; 5633 uint64_t ow7ecc : 10; /**< ECC for OW7 of cache block */ 5634 uint64_t reserved_42_47 : 6; 5635 uint64_t ow6ecc : 10; /**< ECC for OW6 of cache block */ 5636 uint64_t reserved_26_31 : 6; 5637 uint64_t ow5ecc : 10; /**< ECC for OW5 of cache block */ 5638 uint64_t reserved_10_15 : 6; 5639 uint64_t ow4ecc : 10; /**< ECC for OW4 of cache block */ 5640#else 5641 uint64_t ow4ecc : 10; 5642 uint64_t reserved_10_15 : 6; 5643 uint64_t ow5ecc : 10; 5644 uint64_t reserved_26_31 : 6; 5645 uint64_t ow6ecc : 10; 5646 uint64_t reserved_42_47 : 6; 5647 uint64_t ow7ecc : 10; 5648 uint64_t reserved_58_63 : 6; 5649#endif 5650 } s; 5651 struct cvmx_l2c_tadx_ecc1_s cn61xx; 5652 struct cvmx_l2c_tadx_ecc1_s cn63xx; 5653 struct cvmx_l2c_tadx_ecc1_s cn63xxp1; 5654 struct cvmx_l2c_tadx_ecc1_s cn66xx; 5655 struct cvmx_l2c_tadx_ecc1_s cn68xx; 5656 struct cvmx_l2c_tadx_ecc1_s cn68xxp1; 5657 struct cvmx_l2c_tadx_ecc1_s cnf71xx; 5658}; 5659typedef union cvmx_l2c_tadx_ecc1 cvmx_l2c_tadx_ecc1_t; 5660 5661/** 5662 * cvmx_l2c_tad#_ien 5663 * 5664 * L2C_TAD_IEN = L2C TAD Interrupt Enable 5665 * 5666 */ 5667union cvmx_l2c_tadx_ien { 5668 uint64_t u64; 5669 struct cvmx_l2c_tadx_ien_s { 5670#ifdef __BIG_ENDIAN_BITFIELD 5671 uint64_t reserved_9_63 : 55; 5672 uint64_t wrdislmc : 1; /**< Illegal Write to Disabled LMC Error enable 5673 Enables L2C_TADX_INT[WRDISLMC] to 5674 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5675 uint64_t rddislmc : 1; /**< Illegal Read to Disabled LMC Error enable 5676 Enables L2C_TADX_INT[RDDISLMC] to 5677 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5678 uint64_t noway : 1; /**< No way available interrupt enable 5679 Enables L2C_ERR_TTGX[NOWAY]/L2C_TADX_INT[NOWAY] to 5680 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5681 uint64_t vbfdbe : 1; /**< VBF Double-Bit Error enable 5682 Enables L2C_ERR_TDTX[VDBE]/L2C_TADX_INT[VBFSBE] to 5683 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5684 uint64_t vbfsbe : 1; /**< VBF Single-Bit Error enable 5685 Enables L2C_ERR_TDTX[VSBE]/L2C_TADX_INT[VBFSBE] to 5686 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5687 uint64_t tagdbe : 1; /**< TAG Double-Bit Error enable 5688 Enables L2C_ERR_TTGX[DBE]/L2C_TADX_INT[TAGDBE] to 5689 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5690 uint64_t tagsbe : 1; /**< TAG Single-Bit Error enable 5691 Enables L2C_ERR_TTGX[SBE]/L2C_TADX_INT[TAGSBE] to 5692 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5693 uint64_t l2ddbe : 1; /**< L2D Double-Bit Error enable 5694 Enables L2C_ERR_TDTX[DBE]/L2C_TADX_INT[L2DDBE] to 5695 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5696 uint64_t l2dsbe : 1; /**< L2D Single-Bit Error enable 5697 Enables L2C_ERR_TDTX[SBE]/L2C_TADX_INT[L2DSBE] to 5698 assert L2C_INT_REG[TADX] (and cause an interrupt) */ 5699#else 5700 uint64_t l2dsbe : 1; 5701 uint64_t l2ddbe : 1; 5702 uint64_t tagsbe : 1; 5703 uint64_t tagdbe : 1; 5704 uint64_t vbfsbe : 1; 5705 uint64_t vbfdbe : 1; 5706 uint64_t noway : 1; 5707 uint64_t rddislmc : 1; 5708 uint64_t wrdislmc : 1; 5709 uint64_t reserved_9_63 : 55; 5710#endif 5711 } s; 5712 struct cvmx_l2c_tadx_ien_s cn61xx; 5713 struct cvmx_l2c_tadx_ien_s cn63xx; 5714 struct cvmx_l2c_tadx_ien_cn63xxp1 { 5715#ifdef __BIG_ENDIAN_BITFIELD 5716 uint64_t reserved_7_63 : 57; 5717 uint64_t noway : 1; /**< No way available interrupt enable 5718 Enables L2C_ERR_TTGX[NOWAY] to assert 5719 L2C_INT_REG[TADX] (and cause an interrupt) */ 5720 uint64_t vbfdbe : 1; /**< VBF Double-Bit Error enable 5721 Enables L2C_ERR_TDTX[VSBE] to assert 5722 L2C_INT_REG[TADX] (and cause an interrupt) */ 5723 uint64_t vbfsbe : 1; /**< VBF Single-Bit Error enable 5724 Enables L2C_ERR_TDTX[VSBE] to assert 5725 L2C_INT_REG[TADX] (and cause an interrupt) */ 5726 uint64_t tagdbe : 1; /**< TAG Double-Bit Error enable 5727 Enables L2C_ERR_TTGX[DBE] to assert 5728 L2C_INT_REG[TADX] (and cause an interrupt) */ 5729 uint64_t tagsbe : 1; /**< TAG Single-Bit Error enable 5730 Enables L2C_ERR_TTGX[SBE] to assert 5731 L2C_INT_REG[TADX] (and cause an interrupt) */ 5732 uint64_t l2ddbe : 1; /**< L2D Double-Bit Error enable 5733 Enables L2C_ERR_TDTX[DBE] to assert 5734 L2C_INT_REG[TADX] (and cause an interrupt) */ 5735 uint64_t l2dsbe : 1; /**< L2D Single-Bit Error enable 5736 Enables L2C_ERR_TDTX[SBE] to assert 5737 L2C_INT_REG[TADX] (and cause an interrupt) */ 5738#else 5739 uint64_t l2dsbe : 1; 5740 uint64_t l2ddbe : 1; 5741 uint64_t tagsbe : 1; 5742 uint64_t tagdbe : 1; 5743 uint64_t vbfsbe : 1; 5744 uint64_t vbfdbe : 1; 5745 uint64_t noway : 1; 5746 uint64_t reserved_7_63 : 57; 5747#endif 5748 } cn63xxp1; 5749 struct cvmx_l2c_tadx_ien_s cn66xx; 5750 struct cvmx_l2c_tadx_ien_s cn68xx; 5751 struct cvmx_l2c_tadx_ien_s cn68xxp1; 5752 struct cvmx_l2c_tadx_ien_s cnf71xx; 5753}; 5754typedef union cvmx_l2c_tadx_ien cvmx_l2c_tadx_ien_t; 5755 5756/** 5757 * cvmx_l2c_tad#_int 5758 * 5759 * L2C_TAD_INT = L2C TAD Interrupt Register (not present in pass 1 O63) 5760 * 5761 * 5762 * Notes: 5763 * L2C_TAD_IEN is the interrupt enable register corresponding to this register. 5764 * 5765 */ 5766union cvmx_l2c_tadx_int { 5767 uint64_t u64; 5768 struct cvmx_l2c_tadx_int_s { 5769#ifdef __BIG_ENDIAN_BITFIELD 5770 uint64_t reserved_9_63 : 55; 5771 uint64_t wrdislmc : 1; /**< Illegal Write to Disabled LMC Error 5772 A DRAM write arrived before the LMC(s) were enabled */ 5773 uint64_t rddislmc : 1; /**< Illegal Read to Disabled LMC Error 5774 A DRAM read arrived before the LMC(s) were enabled */ 5775 uint64_t noway : 1; /**< No way available interrupt 5776 Shadow copy of L2C_ERR_TTGX[NOWAY] 5777 Writes of 1 also clear L2C_ERR_TTGX[NOWAY] */ 5778 uint64_t vbfdbe : 1; /**< VBF Double-Bit Error 5779 Shadow copy of L2C_ERR_TDTX[VDBE] 5780 Writes of 1 also clear L2C_ERR_TDTX[VDBE] */ 5781 uint64_t vbfsbe : 1; /**< VBF Single-Bit Error 5782 Shadow copy of L2C_ERR_TDTX[VSBE] 5783 Writes of 1 also clear L2C_ERR_TDTX[VSBE] */ 5784 uint64_t tagdbe : 1; /**< TAG Double-Bit Error 5785 Shadow copy of L2C_ERR_TTGX[DBE] 5786 Writes of 1 also clear L2C_ERR_TTGX[DBE] */ 5787 uint64_t tagsbe : 1; /**< TAG Single-Bit Error 5788 Shadow copy of L2C_ERR_TTGX[SBE] 5789 Writes of 1 also clear L2C_ERR_TTGX[SBE] */ 5790 uint64_t l2ddbe : 1; /**< L2D Double-Bit Error 5791 Shadow copy of L2C_ERR_TDTX[DBE] 5792 Writes of 1 also clear L2C_ERR_TDTX[DBE] */ 5793 uint64_t l2dsbe : 1; /**< L2D Single-Bit Error 5794 Shadow copy of L2C_ERR_TDTX[SBE] 5795 Writes of 1 also clear L2C_ERR_TDTX[SBE] */ 5796#else 5797 uint64_t l2dsbe : 1; 5798 uint64_t l2ddbe : 1; 5799 uint64_t tagsbe : 1; 5800 uint64_t tagdbe : 1; 5801 uint64_t vbfsbe : 1; 5802 uint64_t vbfdbe : 1; 5803 uint64_t noway : 1; 5804 uint64_t rddislmc : 1; 5805 uint64_t wrdislmc : 1; 5806 uint64_t reserved_9_63 : 55; 5807#endif 5808 } s; 5809 struct cvmx_l2c_tadx_int_s cn61xx; 5810 struct cvmx_l2c_tadx_int_s cn63xx; 5811 struct cvmx_l2c_tadx_int_s cn66xx; 5812 struct cvmx_l2c_tadx_int_s cn68xx; 5813 struct cvmx_l2c_tadx_int_s cn68xxp1; 5814 struct cvmx_l2c_tadx_int_s cnf71xx; 5815}; 5816typedef union cvmx_l2c_tadx_int cvmx_l2c_tadx_int_t; 5817 5818/** 5819 * cvmx_l2c_tad#_pfc0 5820 * 5821 * L2C_TAD_PFC0 = L2C TAD Performance Counter 0 5822 * 5823 */ 5824union cvmx_l2c_tadx_pfc0 { 5825 uint64_t u64; 5826 struct cvmx_l2c_tadx_pfc0_s { 5827#ifdef __BIG_ENDIAN_BITFIELD 5828 uint64_t count : 64; /**< Current counter value */ 5829#else 5830 uint64_t count : 64; 5831#endif 5832 } s; 5833 struct cvmx_l2c_tadx_pfc0_s cn61xx; 5834 struct cvmx_l2c_tadx_pfc0_s cn63xx; 5835 struct cvmx_l2c_tadx_pfc0_s cn63xxp1; 5836 struct cvmx_l2c_tadx_pfc0_s cn66xx; 5837 struct cvmx_l2c_tadx_pfc0_s cn68xx; 5838 struct cvmx_l2c_tadx_pfc0_s cn68xxp1; 5839 struct cvmx_l2c_tadx_pfc0_s cnf71xx; 5840}; 5841typedef union cvmx_l2c_tadx_pfc0 cvmx_l2c_tadx_pfc0_t; 5842 5843/** 5844 * cvmx_l2c_tad#_pfc1 5845 * 5846 * L2C_TAD_PFC1 = L2C TAD Performance Counter 1 5847 * 5848 */ 5849union cvmx_l2c_tadx_pfc1 { 5850 uint64_t u64; 5851 struct cvmx_l2c_tadx_pfc1_s { 5852#ifdef __BIG_ENDIAN_BITFIELD 5853 uint64_t count : 64; /**< Current counter value */ 5854#else 5855 uint64_t count : 64; 5856#endif 5857 } s; 5858 struct cvmx_l2c_tadx_pfc1_s cn61xx; 5859 struct cvmx_l2c_tadx_pfc1_s cn63xx; 5860 struct cvmx_l2c_tadx_pfc1_s cn63xxp1; 5861 struct cvmx_l2c_tadx_pfc1_s cn66xx; 5862 struct cvmx_l2c_tadx_pfc1_s cn68xx; 5863 struct cvmx_l2c_tadx_pfc1_s cn68xxp1; 5864 struct cvmx_l2c_tadx_pfc1_s cnf71xx; 5865}; 5866typedef union cvmx_l2c_tadx_pfc1 cvmx_l2c_tadx_pfc1_t; 5867 5868/** 5869 * cvmx_l2c_tad#_pfc2 5870 * 5871 * L2C_TAD_PFC2 = L2C TAD Performance Counter 2 5872 * 5873 */ 5874union cvmx_l2c_tadx_pfc2 { 5875 uint64_t u64; 5876 struct cvmx_l2c_tadx_pfc2_s { 5877#ifdef __BIG_ENDIAN_BITFIELD 5878 uint64_t count : 64; /**< Current counter value */ 5879#else 5880 uint64_t count : 64; 5881#endif 5882 } s; 5883 struct cvmx_l2c_tadx_pfc2_s cn61xx; 5884 struct cvmx_l2c_tadx_pfc2_s cn63xx; 5885 struct cvmx_l2c_tadx_pfc2_s cn63xxp1; 5886 struct cvmx_l2c_tadx_pfc2_s cn66xx; 5887 struct cvmx_l2c_tadx_pfc2_s cn68xx; 5888 struct cvmx_l2c_tadx_pfc2_s cn68xxp1; 5889 struct cvmx_l2c_tadx_pfc2_s cnf71xx; 5890}; 5891typedef union cvmx_l2c_tadx_pfc2 cvmx_l2c_tadx_pfc2_t; 5892 5893/** 5894 * cvmx_l2c_tad#_pfc3 5895 * 5896 * L2C_TAD_PFC3 = L2C TAD Performance Counter 3 5897 * 5898 */ 5899union cvmx_l2c_tadx_pfc3 { 5900 uint64_t u64; 5901 struct cvmx_l2c_tadx_pfc3_s { 5902#ifdef __BIG_ENDIAN_BITFIELD 5903 uint64_t count : 64; /**< Current counter value */ 5904#else 5905 uint64_t count : 64; 5906#endif 5907 } s; 5908 struct cvmx_l2c_tadx_pfc3_s cn61xx; 5909 struct cvmx_l2c_tadx_pfc3_s cn63xx; 5910 struct cvmx_l2c_tadx_pfc3_s cn63xxp1; 5911 struct cvmx_l2c_tadx_pfc3_s cn66xx; 5912 struct cvmx_l2c_tadx_pfc3_s cn68xx; 5913 struct cvmx_l2c_tadx_pfc3_s cn68xxp1; 5914 struct cvmx_l2c_tadx_pfc3_s cnf71xx; 5915}; 5916typedef union cvmx_l2c_tadx_pfc3 cvmx_l2c_tadx_pfc3_t; 5917 5918/** 5919 * cvmx_l2c_tad#_prf 5920 * 5921 * L2C_TAD_PRF = L2C TAD Performance Counter Control 5922 * 5923 * 5924 * Notes: 5925 * (1) All four counters are equivalent and can use any of the defined selects. 5926 * 5927 * (2) the CNTnSEL legal values are: 5928 * 0x00 -- Nothing (disabled) 5929 * 0x01 -- L2 Tag Hit 5930 * 0x02 -- L2 Tag Miss 5931 * 0x03 -- L2 Tag NoAlloc (forced no-allocate) 5932 * 0x04 -- L2 Victim 5933 * 0x05 -- SC Fail 5934 * 0x06 -- SC Pass 5935 * 0x07 -- LFB Occupancy (each cycle adds \# of LFBs valid) 5936 * 0x08 -- LFB Wait LFB (each cycle adds \# LFBs waiting for other LFBs) 5937 * 0x09 -- LFB Wait VAB (each cycle adds \# LFBs waiting for VAB) 5938 * 0x80 -- Quad 0 index bus inuse 5939 * 0x81 -- Quad 0 read data bus inuse 5940 * 0x82 -- Quad 0 \# banks inuse (0-4/cycle) 5941 * 0x83 -- Quad 0 wdat flops inuse (0-4/cycle) 5942 * 0x90 -- Quad 1 index bus inuse 5943 * 0x91 -- Quad 1 read data bus inuse 5944 * 0x92 -- Quad 1 \# banks inuse (0-4/cycle) 5945 * 0x93 -- Quad 1 wdat flops inuse (0-4/cycle) 5946 * 0xA0 -- Quad 2 index bus inuse 5947 * 0xA1 -- Quad 2 read data bus inuse 5948 * 0xA2 -- Quad 2 \# banks inuse (0-4/cycle) 5949 * 0xA3 -- Quad 2 wdat flops inuse (0-4/cycle) 5950 * 0xB0 -- Quad 3 index bus inuse 5951 * 0xB1 -- Quad 3 read data bus inuse 5952 * 0xB2 -- Quad 3 \# banks inuse (0-4/cycle) 5953 * 0xB3 -- Quad 3 wdat flops inuse (0-4/cycle) 5954 */ 5955union cvmx_l2c_tadx_prf { 5956 uint64_t u64; 5957 struct cvmx_l2c_tadx_prf_s { 5958#ifdef __BIG_ENDIAN_BITFIELD 5959 uint64_t reserved_32_63 : 32; 5960 uint64_t cnt3sel : 8; /**< Selects event to count for L2C_TAD_PFC3 */ 5961 uint64_t cnt2sel : 8; /**< Selects event to count for L2C_TAD_PFC2 */ 5962 uint64_t cnt1sel : 8; /**< Selects event to count for L2C_TAD_PFC1 */ 5963 uint64_t cnt0sel : 8; /**< Selects event to count for L2C_TAD_PFC0 */ 5964#else 5965 uint64_t cnt0sel : 8; 5966 uint64_t cnt1sel : 8; 5967 uint64_t cnt2sel : 8; 5968 uint64_t cnt3sel : 8; 5969 uint64_t reserved_32_63 : 32; 5970#endif 5971 } s; 5972 struct cvmx_l2c_tadx_prf_s cn61xx; 5973 struct cvmx_l2c_tadx_prf_s cn63xx; 5974 struct cvmx_l2c_tadx_prf_s cn63xxp1; 5975 struct cvmx_l2c_tadx_prf_s cn66xx; 5976 struct cvmx_l2c_tadx_prf_s cn68xx; 5977 struct cvmx_l2c_tadx_prf_s cn68xxp1; 5978 struct cvmx_l2c_tadx_prf_s cnf71xx; 5979}; 5980typedef union cvmx_l2c_tadx_prf cvmx_l2c_tadx_prf_t; 5981 5982/** 5983 * cvmx_l2c_tad#_tag 5984 * 5985 * L2C_TAD_TAG = L2C tag data 5986 * 5987 * Description: holds the tag information for LTGL2I and STGL2I commands 5988 * 5989 * Notes: 5990 * (1) For 63xx TAG[35] must be written zero for STGL2I's or operation is undefined. During normal 5991 * operation, TAG[35] will also read 0. 5992 * 5993 * (2) If setting the LOCK bit, the USE bit should also be set or operation is undefined. 5994 * 5995 * (3) The tag is the corresponding bits from the L2C+LMC internal L2/DRAM byte address. 5996 */ 5997union cvmx_l2c_tadx_tag { 5998 uint64_t u64; 5999 struct cvmx_l2c_tadx_tag_s { 6000#ifdef __BIG_ENDIAN_BITFIELD 6001 uint64_t reserved_46_63 : 18; 6002 uint64_t ecc : 6; /**< The tag ECC */ 6003 uint64_t reserved_36_39 : 4; 6004 uint64_t tag : 19; /**< The tag (see notes 1 and 3) */ 6005 uint64_t reserved_4_16 : 13; 6006 uint64_t use : 1; /**< The LRU use bit */ 6007 uint64_t valid : 1; /**< The valid bit */ 6008 uint64_t dirty : 1; /**< The dirty bit */ 6009 uint64_t lock : 1; /**< The lock bit */ 6010#else 6011 uint64_t lock : 1; 6012 uint64_t dirty : 1; 6013 uint64_t valid : 1; 6014 uint64_t use : 1; 6015 uint64_t reserved_4_16 : 13; 6016 uint64_t tag : 19; 6017 uint64_t reserved_36_39 : 4; 6018 uint64_t ecc : 6; 6019 uint64_t reserved_46_63 : 18; 6020#endif 6021 } s; 6022 struct cvmx_l2c_tadx_tag_s cn61xx; 6023 struct cvmx_l2c_tadx_tag_s cn63xx; 6024 struct cvmx_l2c_tadx_tag_s cn63xxp1; 6025 struct cvmx_l2c_tadx_tag_s cn66xx; 6026 struct cvmx_l2c_tadx_tag_s cn68xx; 6027 struct cvmx_l2c_tadx_tag_s cn68xxp1; 6028 struct cvmx_l2c_tadx_tag_s cnf71xx; 6029}; 6030typedef union cvmx_l2c_tadx_tag cvmx_l2c_tadx_tag_t; 6031 6032/** 6033 * cvmx_l2c_ver_id 6034 * 6035 * L2C_VER_ID = L2C Virtualization ID Error Register 6036 * 6037 * Description: records virtualization IDs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts. 6038 */ 6039union cvmx_l2c_ver_id { 6040 uint64_t u64; 6041 struct cvmx_l2c_ver_id_s { 6042#ifdef __BIG_ENDIAN_BITFIELD 6043 uint64_t mask : 64; /**< Mask of virtualization IDs which had a 6044 HOLEWR/BIGWR/VRTWR error */ 6045#else 6046 uint64_t mask : 64; 6047#endif 6048 } s; 6049 struct cvmx_l2c_ver_id_s cn61xx; 6050 struct cvmx_l2c_ver_id_s cn63xx; 6051 struct cvmx_l2c_ver_id_s cn63xxp1; 6052 struct cvmx_l2c_ver_id_s cn66xx; 6053 struct cvmx_l2c_ver_id_s cn68xx; 6054 struct cvmx_l2c_ver_id_s cn68xxp1; 6055 struct cvmx_l2c_ver_id_s cnf71xx; 6056}; 6057typedef union cvmx_l2c_ver_id cvmx_l2c_ver_id_t; 6058 6059/** 6060 * cvmx_l2c_ver_iob 6061 * 6062 * L2C_VER_IOB = L2C Virtualization ID IOB Error Register 6063 * 6064 * Description: records IOBs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts. 6065 */ 6066union cvmx_l2c_ver_iob { 6067 uint64_t u64; 6068 struct cvmx_l2c_ver_iob_s { 6069#ifdef __BIG_ENDIAN_BITFIELD 6070 uint64_t reserved_2_63 : 62; 6071 uint64_t mask : 2; /**< Mask of IOBs which had a HOLEWR/BIGWR/VRTWR error */ 6072#else 6073 uint64_t mask : 2; 6074 uint64_t reserved_2_63 : 62; 6075#endif 6076 } s; 6077 struct cvmx_l2c_ver_iob_cn61xx { 6078#ifdef __BIG_ENDIAN_BITFIELD 6079 uint64_t reserved_1_63 : 63; 6080 uint64_t mask : 1; /**< Mask of IOBs which had a HOLEWR/BIGWR/VRTWR error */ 6081#else 6082 uint64_t mask : 1; 6083 uint64_t reserved_1_63 : 63; 6084#endif 6085 } cn61xx; 6086 struct cvmx_l2c_ver_iob_cn61xx cn63xx; 6087 struct cvmx_l2c_ver_iob_cn61xx cn63xxp1; 6088 struct cvmx_l2c_ver_iob_cn61xx cn66xx; 6089 struct cvmx_l2c_ver_iob_s cn68xx; 6090 struct cvmx_l2c_ver_iob_s cn68xxp1; 6091 struct cvmx_l2c_ver_iob_cn61xx cnf71xx; 6092}; 6093typedef union cvmx_l2c_ver_iob cvmx_l2c_ver_iob_t; 6094 6095/** 6096 * cvmx_l2c_ver_msc 6097 * 6098 * L2C_VER_MSC = L2C Virtualization Miscellaneous Error Register (not in 63xx pass 1.x) 6099 * 6100 * Description: records type of command associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts 6101 */ 6102union cvmx_l2c_ver_msc { 6103 uint64_t u64; 6104 struct cvmx_l2c_ver_msc_s { 6105#ifdef __BIG_ENDIAN_BITFIELD 6106 uint64_t reserved_2_63 : 62; 6107 uint64_t invl2 : 1; /**< If set, a INVL2 caused HOLEWR/BIGWR/VRT* to set */ 6108 uint64_t dwb : 1; /**< If set, a DWB caused HOLEWR/BIGWR/VRT* to set */ 6109#else 6110 uint64_t dwb : 1; 6111 uint64_t invl2 : 1; 6112 uint64_t reserved_2_63 : 62; 6113#endif 6114 } s; 6115 struct cvmx_l2c_ver_msc_s cn61xx; 6116 struct cvmx_l2c_ver_msc_s cn63xx; 6117 struct cvmx_l2c_ver_msc_s cn66xx; 6118 struct cvmx_l2c_ver_msc_s cn68xx; 6119 struct cvmx_l2c_ver_msc_s cn68xxp1; 6120 struct cvmx_l2c_ver_msc_s cnf71xx; 6121}; 6122typedef union cvmx_l2c_ver_msc cvmx_l2c_ver_msc_t; 6123 6124/** 6125 * cvmx_l2c_ver_pp 6126 * 6127 * L2C_VER_PP = L2C Virtualization ID PP Error Register 6128 * 6129 * Description: records PPs associated with HOLEWR/BIGWR/VRTWR/VRTIDRNG/VRTADRNG interrupts. 6130 */ 6131union cvmx_l2c_ver_pp { 6132 uint64_t u64; 6133 struct cvmx_l2c_ver_pp_s { 6134#ifdef __BIG_ENDIAN_BITFIELD 6135 uint64_t reserved_32_63 : 32; 6136 uint64_t mask : 32; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */ 6137#else 6138 uint64_t mask : 32; 6139 uint64_t reserved_32_63 : 32; 6140#endif 6141 } s; 6142 struct cvmx_l2c_ver_pp_cn61xx { 6143#ifdef __BIG_ENDIAN_BITFIELD 6144 uint64_t reserved_4_63 : 60; 6145 uint64_t mask : 4; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */ 6146#else 6147 uint64_t mask : 4; 6148 uint64_t reserved_4_63 : 60; 6149#endif 6150 } cn61xx; 6151 struct cvmx_l2c_ver_pp_cn63xx { 6152#ifdef __BIG_ENDIAN_BITFIELD 6153 uint64_t reserved_6_63 : 58; 6154 uint64_t mask : 6; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */ 6155#else 6156 uint64_t mask : 6; 6157 uint64_t reserved_6_63 : 58; 6158#endif 6159 } cn63xx; 6160 struct cvmx_l2c_ver_pp_cn63xx cn63xxp1; 6161 struct cvmx_l2c_ver_pp_cn66xx { 6162#ifdef __BIG_ENDIAN_BITFIELD 6163 uint64_t reserved_10_63 : 54; 6164 uint64_t mask : 10; /**< Mask of PPs which had a HOLEWR/BIGWR/VRTWR error */ 6165#else 6166 uint64_t mask : 10; 6167 uint64_t reserved_10_63 : 54; 6168#endif 6169 } cn66xx; 6170 struct cvmx_l2c_ver_pp_s cn68xx; 6171 struct cvmx_l2c_ver_pp_s cn68xxp1; 6172 struct cvmx_l2c_ver_pp_cn61xx cnf71xx; 6173}; 6174typedef union cvmx_l2c_ver_pp cvmx_l2c_ver_pp_t; 6175 6176/** 6177 * cvmx_l2c_virtid_iob# 6178 * 6179 * L2C_VIRTID_IOB = L2C IOB virtualization ID 6180 * 6181 * Description: 6182 */ 6183union cvmx_l2c_virtid_iobx { 6184 uint64_t u64; 6185 struct cvmx_l2c_virtid_iobx_s { 6186#ifdef __BIG_ENDIAN_BITFIELD 6187 uint64_t reserved_14_63 : 50; 6188 uint64_t dwbid : 6; /**< Virtualization ID to use for DWB commands */ 6189 uint64_t reserved_6_7 : 2; 6190 uint64_t id : 6; /**< Virtualization ID to use for non-DWB commands */ 6191#else 6192 uint64_t id : 6; 6193 uint64_t reserved_6_7 : 2; 6194 uint64_t dwbid : 6; 6195 uint64_t reserved_14_63 : 50; 6196#endif 6197 } s; 6198 struct cvmx_l2c_virtid_iobx_s cn61xx; 6199 struct cvmx_l2c_virtid_iobx_s cn63xx; 6200 struct cvmx_l2c_virtid_iobx_s cn63xxp1; 6201 struct cvmx_l2c_virtid_iobx_s cn66xx; 6202 struct cvmx_l2c_virtid_iobx_s cn68xx; 6203 struct cvmx_l2c_virtid_iobx_s cn68xxp1; 6204 struct cvmx_l2c_virtid_iobx_s cnf71xx; 6205}; 6206typedef union cvmx_l2c_virtid_iobx cvmx_l2c_virtid_iobx_t; 6207 6208/** 6209 * cvmx_l2c_virtid_pp# 6210 * 6211 * L2C_VIRTID_PP = L2C PP virtualization ID 6212 * 6213 * Description: 6214 */ 6215union cvmx_l2c_virtid_ppx { 6216 uint64_t u64; 6217 struct cvmx_l2c_virtid_ppx_s { 6218#ifdef __BIG_ENDIAN_BITFIELD 6219 uint64_t reserved_6_63 : 58; 6220 uint64_t id : 6; /**< Virtualization ID to use for this PP. */ 6221#else 6222 uint64_t id : 6; 6223 uint64_t reserved_6_63 : 58; 6224#endif 6225 } s; 6226 struct cvmx_l2c_virtid_ppx_s cn61xx; 6227 struct cvmx_l2c_virtid_ppx_s cn63xx; 6228 struct cvmx_l2c_virtid_ppx_s cn63xxp1; 6229 struct cvmx_l2c_virtid_ppx_s cn66xx; 6230 struct cvmx_l2c_virtid_ppx_s cn68xx; 6231 struct cvmx_l2c_virtid_ppx_s cn68xxp1; 6232 struct cvmx_l2c_virtid_ppx_s cnf71xx; 6233}; 6234typedef union cvmx_l2c_virtid_ppx cvmx_l2c_virtid_ppx_t; 6235 6236/** 6237 * cvmx_l2c_vrt_ctl 6238 * 6239 * L2C_VRT_CTL = L2C Virtualization control register 6240 * 6241 */ 6242union cvmx_l2c_vrt_ctl { 6243 uint64_t u64; 6244 struct cvmx_l2c_vrt_ctl_s { 6245#ifdef __BIG_ENDIAN_BITFIELD 6246 uint64_t reserved_9_63 : 55; 6247 uint64_t ooberr : 1; /**< Whether out of bounds writes are an error 6248 Determines virtualization hardware behavior for 6249 a store to an L2/DRAM address larger than 6250 indicated by MEMSZ. If OOBERR is set, all these 6251 stores (from any virtualization ID) are blocked. If 6252 OOBERR is clear, none of these stores are blocked. */ 6253 uint64_t reserved_7_7 : 1; 6254 uint64_t memsz : 3; /**< Memory space coverage of L2C_VRT_MEM (encoded) 6255 0 = 1GB 6256 1 = 2GB 6257 2 = 4GB 6258 3 = 8GB 6259 4 = 16GB 6260 5 = 32GB 6261 6 = 64GB (**reserved in 63xx**) 6262 7 = 128GB (**reserved in 63xx**) */ 6263 uint64_t numid : 3; /**< Number of allowed virtualization IDs (encoded) 6264 0 = 2 6265 1 = 4 6266 2 = 8 6267 3 = 16 6268 4 = 32 6269 5 = 64 6270 6,7 illegal 6271 Violations of this limit causes 6272 L2C to set L2C_INT_REG[VRTIDRNG]. */ 6273 uint64_t enable : 1; /**< Global virtualization enable 6274 When ENABLE is clear, stores are never blocked by 6275 the L2C virtualization hardware and none of NUMID, 6276 MEMSZ, OOBERR are used. */ 6277#else 6278 uint64_t enable : 1; 6279 uint64_t numid : 3; 6280 uint64_t memsz : 3; 6281 uint64_t reserved_7_7 : 1; 6282 uint64_t ooberr : 1; 6283 uint64_t reserved_9_63 : 55; 6284#endif 6285 } s; 6286 struct cvmx_l2c_vrt_ctl_s cn61xx; 6287 struct cvmx_l2c_vrt_ctl_s cn63xx; 6288 struct cvmx_l2c_vrt_ctl_s cn63xxp1; 6289 struct cvmx_l2c_vrt_ctl_s cn66xx; 6290 struct cvmx_l2c_vrt_ctl_s cn68xx; 6291 struct cvmx_l2c_vrt_ctl_s cn68xxp1; 6292 struct cvmx_l2c_vrt_ctl_s cnf71xx; 6293}; 6294typedef union cvmx_l2c_vrt_ctl cvmx_l2c_vrt_ctl_t; 6295 6296/** 6297 * cvmx_l2c_vrt_mem# 6298 * 6299 * L2C_VRT_MEM = L2C Virtualization Memory 6300 * 6301 * Description: Virtualization memory mapped region. There are 1024 32b 6302 * byte-parity protected entries. 6303 * 6304 * Notes: 6305 * When a DATA bit is set in L2C_VRT_MEM when L2C virtualization is enabled, L2C 6306 * prevents the selected virtual machine from storing to the selected L2/DRAM region. 6307 * L2C uses L2C_VRT_MEM to block stores when: 6308 * - L2C_VRT_CTL[ENABLE] is set, and 6309 * - the address of the store exists in L2C+LMC internal L2/DRAM Address space 6310 * and is within the L2C_VRT_CTL[MEMSZ] bounds, and 6311 * - the virtID of the store is within the L2C_VRT_CTL[NUMID] bounds 6312 * 6313 * L2C_VRT_MEM is never used for these L2C transactions which are always allowed: 6314 * - L2C CMI L2/DRAM transactions that cannot modify L2/DRAM, and 6315 * - any L2/DRAM transaction originated from L2C_XMC_CMD 6316 * 6317 * L2C_VRT_MEM contains one DATA bit per L2C+LMC internal L2/DRAM region and virtID indicating whether the store 6318 * to the region is allowed. The granularity of the checking is the region size, which is: 6319 * 2 ^^ (L2C_VRT_CTL[NUMID]+L2C_VRT_CTL[MEMSZ]+16) 6320 * which ranges from a minimum of 64KB to a maximum of 256MB, depending on the size 6321 * of L2/DRAM that is protected and the number of virtual machines. 6322 * 6323 * The L2C_VRT_MEM DATA bit that L2C uses is: 6324 * 6325 * l2c_vrt_mem_bit_index = address >> (L2C_VRT_CTL[MEMSZ]+L2C_VRT_CTL[NUMID]+16); // address is a byte address 6326 * l2c_vrt_mem_bit_index = l2c_vrt_mem_bit_index | (virtID << (14-L2C_VRT_CTL[NUMID])); 6327 * 6328 * L2C_VRT_MEM(l2c_vrt_mem_bit_index >> 5)[DATA<l2c_vrt_mem_bit_index & 0x1F>] is used 6329 * 6330 * A specific example: 6331 * 6332 * L2C_VRT_CTL[NUMID]=2 (i.e. 8 virtual machine ID's used) 6333 * L2C_VRT_CTL[MEMSZ]=4 (i.e. L2C_VRT_MEM covers 16 GB) 6334 * 6335 * L2/DRAM region size (granularity) is 4MB 6336 * 6337 * l2c_vrt_mem_bit_index<14:12> = virtID<2:0> 6338 * l2c_vrt_mem_bit_index<11:0> = address<33:22> 6339 * 6340 * For L2/DRAM physical address 0x51000000 with virtID=5: 6341 * L2C_VRT_MEM648[DATA<4>] determines when the store is allowed (648 is decimal, not hex) 6342 */ 6343union cvmx_l2c_vrt_memx { 6344 uint64_t u64; 6345 struct cvmx_l2c_vrt_memx_s { 6346#ifdef __BIG_ENDIAN_BITFIELD 6347 uint64_t reserved_36_63 : 28; 6348 uint64_t parity : 4; /**< Parity to write into (or read from) the 6349 virtualization memory. 6350 PARITY<i> is the even parity of DATA<(i*8)+7:i*8> */ 6351 uint64_t data : 32; /**< Data to write into (or read from) the 6352 virtualization memory. */ 6353#else 6354 uint64_t data : 32; 6355 uint64_t parity : 4; 6356 uint64_t reserved_36_63 : 28; 6357#endif 6358 } s; 6359 struct cvmx_l2c_vrt_memx_s cn61xx; 6360 struct cvmx_l2c_vrt_memx_s cn63xx; 6361 struct cvmx_l2c_vrt_memx_s cn63xxp1; 6362 struct cvmx_l2c_vrt_memx_s cn66xx; 6363 struct cvmx_l2c_vrt_memx_s cn68xx; 6364 struct cvmx_l2c_vrt_memx_s cn68xxp1; 6365 struct cvmx_l2c_vrt_memx_s cnf71xx; 6366}; 6367typedef union cvmx_l2c_vrt_memx cvmx_l2c_vrt_memx_t; 6368 6369/** 6370 * cvmx_l2c_wpar_iob# 6371 * 6372 * L2C_WPAR_IOB = L2C IOB way partitioning 6373 * 6374 * 6375 * Notes: 6376 * (1) The read value of MASK will include bits set because of the L2C cripple fuses. 6377 * 6378 */ 6379union cvmx_l2c_wpar_iobx { 6380 uint64_t u64; 6381 struct cvmx_l2c_wpar_iobx_s { 6382#ifdef __BIG_ENDIAN_BITFIELD 6383 uint64_t reserved_16_63 : 48; 6384 uint64_t mask : 16; /**< Way partitioning mask. (1 means do not use) */ 6385#else 6386 uint64_t mask : 16; 6387 uint64_t reserved_16_63 : 48; 6388#endif 6389 } s; 6390 struct cvmx_l2c_wpar_iobx_s cn61xx; 6391 struct cvmx_l2c_wpar_iobx_s cn63xx; 6392 struct cvmx_l2c_wpar_iobx_s cn63xxp1; 6393 struct cvmx_l2c_wpar_iobx_s cn66xx; 6394 struct cvmx_l2c_wpar_iobx_s cn68xx; 6395 struct cvmx_l2c_wpar_iobx_s cn68xxp1; 6396 struct cvmx_l2c_wpar_iobx_s cnf71xx; 6397}; 6398typedef union cvmx_l2c_wpar_iobx cvmx_l2c_wpar_iobx_t; 6399 6400/** 6401 * cvmx_l2c_wpar_pp# 6402 * 6403 * L2C_WPAR_PP = L2C PP way partitioning 6404 * 6405 * 6406 * Notes: 6407 * (1) The read value of MASK will include bits set because of the L2C cripple fuses. 6408 * 6409 */ 6410union cvmx_l2c_wpar_ppx { 6411 uint64_t u64; 6412 struct cvmx_l2c_wpar_ppx_s { 6413#ifdef __BIG_ENDIAN_BITFIELD 6414 uint64_t reserved_16_63 : 48; 6415 uint64_t mask : 16; /**< Way partitioning mask. (1 means do not use) */ 6416#else 6417 uint64_t mask : 16; 6418 uint64_t reserved_16_63 : 48; 6419#endif 6420 } s; 6421 struct cvmx_l2c_wpar_ppx_s cn61xx; 6422 struct cvmx_l2c_wpar_ppx_s cn63xx; 6423 struct cvmx_l2c_wpar_ppx_s cn63xxp1; 6424 struct cvmx_l2c_wpar_ppx_s cn66xx; 6425 struct cvmx_l2c_wpar_ppx_s cn68xx; 6426 struct cvmx_l2c_wpar_ppx_s cn68xxp1; 6427 struct cvmx_l2c_wpar_ppx_s cnf71xx; 6428}; 6429typedef union cvmx_l2c_wpar_ppx cvmx_l2c_wpar_ppx_t; 6430 6431/** 6432 * cvmx_l2c_xmc#_pfc 6433 * 6434 * L2C_XMC_PFC = L2C XMC Performance Counter(s) 6435 * 6436 */ 6437union cvmx_l2c_xmcx_pfc { 6438 uint64_t u64; 6439 struct cvmx_l2c_xmcx_pfc_s { 6440#ifdef __BIG_ENDIAN_BITFIELD 6441 uint64_t count : 64; /**< Current counter value */ 6442#else 6443 uint64_t count : 64; 6444#endif 6445 } s; 6446 struct cvmx_l2c_xmcx_pfc_s cn61xx; 6447 struct cvmx_l2c_xmcx_pfc_s cn63xx; 6448 struct cvmx_l2c_xmcx_pfc_s cn63xxp1; 6449 struct cvmx_l2c_xmcx_pfc_s cn66xx; 6450 struct cvmx_l2c_xmcx_pfc_s cn68xx; 6451 struct cvmx_l2c_xmcx_pfc_s cn68xxp1; 6452 struct cvmx_l2c_xmcx_pfc_s cnf71xx; 6453}; 6454typedef union cvmx_l2c_xmcx_pfc cvmx_l2c_xmcx_pfc_t; 6455 6456/** 6457 * cvmx_l2c_xmc_cmd 6458 * 6459 * L2C_XMC_CMD = L2C XMC command register 6460 * 6461 * 6462 * Notes: 6463 * (1) the XMC command chosen MUST NOT be a IOB destined command or operation is UNDEFINED. 6464 * 6465 * (2) the XMC command will have sid forced to IOB, did forced to L2C, no virtualization checks 6466 * performed (always pass), and xmdmsk forced to 0. Note that this implies that commands which 6467 * REQUIRE an XMD cycle (STP,STC,SAA,FAA,FAS) should not be used or the results are unpredictable. 6468 * The sid=IOB means that the way partitioning used for the command is L2C_WPAR_IOB. 6469 * None of L2C_QOS_IOB, L2C_QOS_PP, L2C_VIRTID_IOB, L2C_VIRTID_PP are used for these commands. 6470 * 6471 * (3) any responses generated by the XMC command will be forced to PP7 (a non-existant PP) effectively 6472 * causing them to be ignored. Generated STINs, however, will correctly invalidate the required 6473 * PPs. 6474 * 6475 * (4) any L2D read generated by the XMC command will record the syndrome information in 6476 * L2C_TAD_ECC0/1. If ECC is disabled prior to the CSR write this provides the ability to read the 6477 * ECC bits directly. If ECC is not disabled this should log 0's (assuming no ECC errors were 6478 * found in the block). 6479 * 6480 * (5) A write which arrives while the INUSE bit is set will block until the INUSE bit clears. This 6481 * gives software 2 options when needing to issue a stream of writes to L2C_XMC_CMD: polling on the 6482 * INUSE bit, or allowing HW to handle the interlock -- at the expense of locking up the RSL bus 6483 * for potentially tens of cycles at a time while waiting for an available LFB/VAB entry. 6484 * 6485 * (6) The address written to L2C_XMC_CMD is a 38-bit OCTEON physical address. L2C performs hole removal and 6486 * index aliasing (if enabled) on the written address and uses that for the command. This hole 6487 * removed/index aliased 38-bit address is what is returned on a read of the L2C_XMC_CMD register. 6488 */ 6489union cvmx_l2c_xmc_cmd { 6490 uint64_t u64; 6491 struct cvmx_l2c_xmc_cmd_s { 6492#ifdef __BIG_ENDIAN_BITFIELD 6493 uint64_t inuse : 1; /**< Set to 1 by HW upon receiving a write, cleared when 6494 command has issued (not necessarily completed, but 6495 ordered relative to other traffic) and HW can accept 6496 another command. */ 6497 uint64_t cmd : 6; /**< Command to use for simulated XMC request 6498 a new request can be accepted */ 6499 uint64_t reserved_38_56 : 19; 6500 uint64_t addr : 38; /**< Address to use for simulated XMC request (see Note 6) */ 6501#else 6502 uint64_t addr : 38; 6503 uint64_t reserved_38_56 : 19; 6504 uint64_t cmd : 6; 6505 uint64_t inuse : 1; 6506#endif 6507 } s; 6508 struct cvmx_l2c_xmc_cmd_s cn61xx; 6509 struct cvmx_l2c_xmc_cmd_s cn63xx; 6510 struct cvmx_l2c_xmc_cmd_s cn63xxp1; 6511 struct cvmx_l2c_xmc_cmd_s cn66xx; 6512 struct cvmx_l2c_xmc_cmd_s cn68xx; 6513 struct cvmx_l2c_xmc_cmd_s cn68xxp1; 6514 struct cvmx_l2c_xmc_cmd_s cnf71xx; 6515}; 6516typedef union cvmx_l2c_xmc_cmd cvmx_l2c_xmc_cmd_t; 6517 6518/** 6519 * cvmx_l2c_xmd#_pfc 6520 * 6521 * L2C_XMD_PFC = L2C XMD Performance Counter(s) 6522 * 6523 */ 6524union cvmx_l2c_xmdx_pfc { 6525 uint64_t u64; 6526 struct cvmx_l2c_xmdx_pfc_s { 6527#ifdef __BIG_ENDIAN_BITFIELD 6528 uint64_t count : 64; /**< Current counter value */ 6529#else 6530 uint64_t count : 64; 6531#endif 6532 } s; 6533 struct cvmx_l2c_xmdx_pfc_s cn61xx; 6534 struct cvmx_l2c_xmdx_pfc_s cn63xx; 6535 struct cvmx_l2c_xmdx_pfc_s cn63xxp1; 6536 struct cvmx_l2c_xmdx_pfc_s cn66xx; 6537 struct cvmx_l2c_xmdx_pfc_s cn68xx; 6538 struct cvmx_l2c_xmdx_pfc_s cn68xxp1; 6539 struct cvmx_l2c_xmdx_pfc_s cnf71xx; 6540}; 6541typedef union cvmx_l2c_xmdx_pfc cvmx_l2c_xmdx_pfc_t; 6542 6543#endif 6544