1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * cvmx-smix-defs.h 43 * 44 * Configuration and status register (CSR) type definitions for 45 * Octeon smix. 46 * 47 * This file is auto generated. Do not edit. 48 * 49 * <hr>$Revision$<hr> 50 * 51 */ 52#ifndef __CVMX_SMIX_TYPEDEFS_H__ 53#define __CVMX_SMIX_TYPEDEFS_H__ 54 55#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56static inline uint64_t CVMX_SMIX_CLK(unsigned long offset) 57{ 58 if (!( 59 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) || 60 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) || 61 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) || 62 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) || 63 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 64 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) || 65 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) || 66 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))))) 67 cvmx_warn("CVMX_SMIX_CLK(%lu) is invalid on this chip\n", offset); 68 return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256; 69} 70#else 71#define CVMX_SMIX_CLK(offset) (CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256) 72#endif 73#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 74static inline uint64_t CVMX_SMIX_CMD(unsigned long offset) 75{ 76 if (!( 77 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) || 78 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) || 79 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) || 80 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) || 81 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 82 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) || 83 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) || 84 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))))) 85 cvmx_warn("CVMX_SMIX_CMD(%lu) is invalid on this chip\n", offset); 86 return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256; 87} 88#else 89#define CVMX_SMIX_CMD(offset) (CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256) 90#endif 91#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 92static inline uint64_t CVMX_SMIX_EN(unsigned long offset) 93{ 94 if (!( 95 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) || 96 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) || 97 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) || 98 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) || 99 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 100 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) || 101 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) || 102 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))))) 103 cvmx_warn("CVMX_SMIX_EN(%lu) is invalid on this chip\n", offset); 104 return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256; 105} 106#else 107#define CVMX_SMIX_EN(offset) (CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256) 108#endif 109#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 110static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset) 111{ 112 if (!( 113 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) || 114 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) || 115 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) || 116 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) || 117 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 118 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) || 119 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) || 120 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))))) 121 cvmx_warn("CVMX_SMIX_RD_DAT(%lu) is invalid on this chip\n", offset); 122 return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256; 123} 124#else 125#define CVMX_SMIX_RD_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256) 126#endif 127#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 128static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset) 129{ 130 if (!( 131 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) || 132 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) || 133 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) || 134 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) || 135 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) || 136 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) || 137 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) || 138 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))))) 139 cvmx_warn("CVMX_SMIX_WR_DAT(%lu) is invalid on this chip\n", offset); 140 return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256; 141} 142#else 143#define CVMX_SMIX_WR_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256) 144#endif 145 146/** 147 * cvmx_smi#_clk 148 * 149 * SMI_CLK = Clock Control Register 150 * 151 */ 152union cvmx_smix_clk 153{ 154 uint64_t u64; 155 struct cvmx_smix_clk_s 156 { 157#if __BYTE_ORDER == __BIG_ENDIAN 158 uint64_t reserved_25_63 : 39; 159 uint64_t mode : 1; /**< IEEE operating mode 160 0=Clause 22 complient 161 1=Clause 45 complient */ 162 uint64_t reserved_21_23 : 3; 163 uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */ 164 uint64_t sample_mode : 1; /**< Read Data sampling mode 165 According to the 802.3 spec, on reads, the STA 166 transitions MDC and the PHY drives MDIO with 167 some delay relative to that edge. This is edge1. 168 The STA then samples MDIO on the next rising edge 169 of MDC. This is edge2. Octeon can sample the 170 read data relative to either edge. 171 0=[SAMPLE_HI,SAMPLE] specify the sample time 172 relative to edge2 173 1=[SAMPLE_HI,SAMPLE] specify the sample time 174 relative to edge1 */ 175 uint64_t reserved_14_14 : 1; 176 uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */ 177 uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton 178 PREAMBLE must be set 1 when MODE=1 in order 179 for the receiving PHY to correctly frame the 180 transaction. */ 181 uint64_t sample : 4; /**< When to sample read data 182 (number of eclks after the rising edge of mdc) 183 ( [SAMPLE_HI,SAMPLE] > 1 ) 184 ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */ 185 uint64_t phase : 8; /**< MDC Clock Phase 186 (number of eclks that make up an mdc phase) 187 (PHASE > 2) */ 188#else 189 uint64_t phase : 8; 190 uint64_t sample : 4; 191 uint64_t preamble : 1; 192 uint64_t clk_idle : 1; 193 uint64_t reserved_14_14 : 1; 194 uint64_t sample_mode : 1; 195 uint64_t sample_hi : 5; 196 uint64_t reserved_21_23 : 3; 197 uint64_t mode : 1; 198 uint64_t reserved_25_63 : 39; 199#endif 200 } s; 201 struct cvmx_smix_clk_cn30xx 202 { 203#if __BYTE_ORDER == __BIG_ENDIAN 204 uint64_t reserved_21_63 : 43; 205 uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */ 206 uint64_t sample_mode : 1; /**< Read Data sampling mode 207 According to the 802.3 spec, on reads, the STA 208 transitions MDC and the PHY drives MDIO with 209 some delay relative to that edge. This is edge1. 210 The STA then samples MDIO on the next rising edge 211 of MDC. This is edge2. Octeon can sample the 212 read data relative to either edge. 213 0=[SAMPLE_HI,SAMPLE] specify the sample time 214 relative to edge2 215 1=[SAMPLE_HI,SAMPLE] specify the sample time 216 relative to edge1 */ 217 uint64_t reserved_14_14 : 1; 218 uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */ 219 uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton */ 220 uint64_t sample : 4; /**< When to sample read data 221 (number of eclks after the rising edge of mdc) 222 ( [SAMPLE_HI,SAMPLE] > 1 ) 223 ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */ 224 uint64_t phase : 8; /**< MDC Clock Phase 225 (number of eclks that make up an mdc phase) 226 (PHASE > 2) */ 227#else 228 uint64_t phase : 8; 229 uint64_t sample : 4; 230 uint64_t preamble : 1; 231 uint64_t clk_idle : 1; 232 uint64_t reserved_14_14 : 1; 233 uint64_t sample_mode : 1; 234 uint64_t sample_hi : 5; 235 uint64_t reserved_21_63 : 43; 236#endif 237 } cn30xx; 238 struct cvmx_smix_clk_cn30xx cn31xx; 239 struct cvmx_smix_clk_cn30xx cn38xx; 240 struct cvmx_smix_clk_cn30xx cn38xxp2; 241 struct cvmx_smix_clk_s cn50xx; 242 struct cvmx_smix_clk_s cn52xx; 243 struct cvmx_smix_clk_s cn52xxp1; 244 struct cvmx_smix_clk_s cn56xx; 245 struct cvmx_smix_clk_s cn56xxp1; 246 struct cvmx_smix_clk_cn30xx cn58xx; 247 struct cvmx_smix_clk_cn30xx cn58xxp1; 248 struct cvmx_smix_clk_s cn63xx; 249 struct cvmx_smix_clk_s cn63xxp1; 250}; 251typedef union cvmx_smix_clk cvmx_smix_clk_t; 252 253/** 254 * cvmx_smi#_cmd 255 * 256 * SMI_CMD = Force a Read/Write command to the PHY 257 * 258 * 259 * Notes: 260 * Writes to this register will create SMI xactions. Software will poll on (depending on the xaction type). 261 * 262 */ 263union cvmx_smix_cmd 264{ 265 uint64_t u64; 266 struct cvmx_smix_cmd_s 267 { 268#if __BYTE_ORDER == __BIG_ENDIAN 269 uint64_t reserved_18_63 : 46; 270 uint64_t phy_op : 2; /**< PHY Opcode depending on SMI_CLK[MODE] 271 SMI_CLK[MODE] == 0 (<=1Gbs / Clause 22) 272 x0=write 273 x1=read 274 SMI_CLK[MODE] == 1 (>1Gbs / Clause 45) 275 00=address 276 01=write 277 11=read 278 10=post-read-increment-address */ 279 uint64_t reserved_13_15 : 3; 280 uint64_t phy_adr : 5; /**< PHY Address */ 281 uint64_t reserved_5_7 : 3; 282 uint64_t reg_adr : 5; /**< PHY Register Offset */ 283#else 284 uint64_t reg_adr : 5; 285 uint64_t reserved_5_7 : 3; 286 uint64_t phy_adr : 5; 287 uint64_t reserved_13_15 : 3; 288 uint64_t phy_op : 2; 289 uint64_t reserved_18_63 : 46; 290#endif 291 } s; 292 struct cvmx_smix_cmd_cn30xx 293 { 294#if __BYTE_ORDER == __BIG_ENDIAN 295 uint64_t reserved_17_63 : 47; 296 uint64_t phy_op : 1; /**< PHY Opcode 297 0=write 298 1=read */ 299 uint64_t reserved_13_15 : 3; 300 uint64_t phy_adr : 5; /**< PHY Address */ 301 uint64_t reserved_5_7 : 3; 302 uint64_t reg_adr : 5; /**< PHY Register Offset */ 303#else 304 uint64_t reg_adr : 5; 305 uint64_t reserved_5_7 : 3; 306 uint64_t phy_adr : 5; 307 uint64_t reserved_13_15 : 3; 308 uint64_t phy_op : 1; 309 uint64_t reserved_17_63 : 47; 310#endif 311 } cn30xx; 312 struct cvmx_smix_cmd_cn30xx cn31xx; 313 struct cvmx_smix_cmd_cn30xx cn38xx; 314 struct cvmx_smix_cmd_cn30xx cn38xxp2; 315 struct cvmx_smix_cmd_s cn50xx; 316 struct cvmx_smix_cmd_s cn52xx; 317 struct cvmx_smix_cmd_s cn52xxp1; 318 struct cvmx_smix_cmd_s cn56xx; 319 struct cvmx_smix_cmd_s cn56xxp1; 320 struct cvmx_smix_cmd_cn30xx cn58xx; 321 struct cvmx_smix_cmd_cn30xx cn58xxp1; 322 struct cvmx_smix_cmd_s cn63xx; 323 struct cvmx_smix_cmd_s cn63xxp1; 324}; 325typedef union cvmx_smix_cmd cvmx_smix_cmd_t; 326 327/** 328 * cvmx_smi#_en 329 * 330 * SMI_EN = Enable the SMI interface 331 * 332 */ 333union cvmx_smix_en 334{ 335 uint64_t u64; 336 struct cvmx_smix_en_s 337 { 338#if __BYTE_ORDER == __BIG_ENDIAN 339 uint64_t reserved_1_63 : 63; 340 uint64_t en : 1; /**< Interface enable 341 0=SMI Interface is down / no transactions, no MDC 342 1=SMI Interface is up */ 343#else 344 uint64_t en : 1; 345 uint64_t reserved_1_63 : 63; 346#endif 347 } s; 348 struct cvmx_smix_en_s cn30xx; 349 struct cvmx_smix_en_s cn31xx; 350 struct cvmx_smix_en_s cn38xx; 351 struct cvmx_smix_en_s cn38xxp2; 352 struct cvmx_smix_en_s cn50xx; 353 struct cvmx_smix_en_s cn52xx; 354 struct cvmx_smix_en_s cn52xxp1; 355 struct cvmx_smix_en_s cn56xx; 356 struct cvmx_smix_en_s cn56xxp1; 357 struct cvmx_smix_en_s cn58xx; 358 struct cvmx_smix_en_s cn58xxp1; 359 struct cvmx_smix_en_s cn63xx; 360 struct cvmx_smix_en_s cn63xxp1; 361}; 362typedef union cvmx_smix_en cvmx_smix_en_t; 363 364/** 365 * cvmx_smi#_rd_dat 366 * 367 * SMI_RD_DAT = SMI Read Data 368 * 369 * 370 * Notes: 371 * VAL will assert when the read xaction completes. A read to this register 372 * will clear VAL. PENDING indicates that an SMI RD transaction is in flight. 373 */ 374union cvmx_smix_rd_dat 375{ 376 uint64_t u64; 377 struct cvmx_smix_rd_dat_s 378 { 379#if __BYTE_ORDER == __BIG_ENDIAN 380 uint64_t reserved_18_63 : 46; 381 uint64_t pending : 1; /**< Read Xaction Pending */ 382 uint64_t val : 1; /**< Read Data Valid */ 383 uint64_t dat : 16; /**< Read Data */ 384#else 385 uint64_t dat : 16; 386 uint64_t val : 1; 387 uint64_t pending : 1; 388 uint64_t reserved_18_63 : 46; 389#endif 390 } s; 391 struct cvmx_smix_rd_dat_s cn30xx; 392 struct cvmx_smix_rd_dat_s cn31xx; 393 struct cvmx_smix_rd_dat_s cn38xx; 394 struct cvmx_smix_rd_dat_s cn38xxp2; 395 struct cvmx_smix_rd_dat_s cn50xx; 396 struct cvmx_smix_rd_dat_s cn52xx; 397 struct cvmx_smix_rd_dat_s cn52xxp1; 398 struct cvmx_smix_rd_dat_s cn56xx; 399 struct cvmx_smix_rd_dat_s cn56xxp1; 400 struct cvmx_smix_rd_dat_s cn58xx; 401 struct cvmx_smix_rd_dat_s cn58xxp1; 402 struct cvmx_smix_rd_dat_s cn63xx; 403 struct cvmx_smix_rd_dat_s cn63xxp1; 404}; 405typedef union cvmx_smix_rd_dat cvmx_smix_rd_dat_t; 406 407/** 408 * cvmx_smi#_wr_dat 409 * 410 * SMI_WR_DAT = SMI Write Data 411 * 412 * 413 * Notes: 414 * VAL will assert when the write xaction completes. A read to this register 415 * will clear VAL. PENDING indicates that an SMI WR transaction is in flight. 416 */ 417union cvmx_smix_wr_dat 418{ 419 uint64_t u64; 420 struct cvmx_smix_wr_dat_s 421 { 422#if __BYTE_ORDER == __BIG_ENDIAN 423 uint64_t reserved_18_63 : 46; 424 uint64_t pending : 1; /**< Write Xaction Pending */ 425 uint64_t val : 1; /**< Write Data Valid */ 426 uint64_t dat : 16; /**< Write Data */ 427#else 428 uint64_t dat : 16; 429 uint64_t val : 1; 430 uint64_t pending : 1; 431 uint64_t reserved_18_63 : 46; 432#endif 433 } s; 434 struct cvmx_smix_wr_dat_s cn30xx; 435 struct cvmx_smix_wr_dat_s cn31xx; 436 struct cvmx_smix_wr_dat_s cn38xx; 437 struct cvmx_smix_wr_dat_s cn38xxp2; 438 struct cvmx_smix_wr_dat_s cn50xx; 439 struct cvmx_smix_wr_dat_s cn52xx; 440 struct cvmx_smix_wr_dat_s cn52xxp1; 441 struct cvmx_smix_wr_dat_s cn56xx; 442 struct cvmx_smix_wr_dat_s cn56xxp1; 443 struct cvmx_smix_wr_dat_s cn58xx; 444 struct cvmx_smix_wr_dat_s cn58xxp1; 445 struct cvmx_smix_wr_dat_s cn63xx; 446 struct cvmx_smix_wr_dat_s cn63xxp1; 447}; 448typedef union cvmx_smix_wr_dat cvmx_smix_wr_dat_t; 449 450#endif 451