1215976Sjmallett/***********************license start*************** 2232812Sjmallett * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights 3215976Sjmallett * reserved. 4215976Sjmallett * 5215976Sjmallett * 6215976Sjmallett * Redistribution and use in source and binary forms, with or without 7215976Sjmallett * modification, are permitted provided that the following conditions are 8215976Sjmallett * met: 9215976Sjmallett * 10215976Sjmallett * * Redistributions of source code must retain the above copyright 11215976Sjmallett * notice, this list of conditions and the following disclaimer. 12215976Sjmallett * 13215976Sjmallett * * Redistributions in binary form must reproduce the above 14215976Sjmallett * copyright notice, this list of conditions and the following 15215976Sjmallett * disclaimer in the documentation and/or other materials provided 16215976Sjmallett * with the distribution. 17215976Sjmallett 18232812Sjmallett * * Neither the name of Cavium Inc. nor the names of 19215976Sjmallett * its contributors may be used to endorse or promote products 20215976Sjmallett * derived from this software without specific prior written 21215976Sjmallett * permission. 22215976Sjmallett 23215976Sjmallett * This Software, including technical data, may be subject to U.S. export control 24215976Sjmallett * laws, including the U.S. Export Administration Act and its associated 25215976Sjmallett * regulations, and may be subject to export or import regulations in other 26215976Sjmallett * countries. 27215976Sjmallett 28215976Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29232812Sjmallett * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR 30215976Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31215976Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32215976Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33215976Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34215976Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35215976Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36215976Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37215976Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38215976Sjmallett ***********************license end**************************************/ 39215976Sjmallett 40215976Sjmallett 41215976Sjmallett/** 42215976Sjmallett * cvmx-lmcx-defs.h 43215976Sjmallett * 44215976Sjmallett * Configuration and status register (CSR) type definitions for 45215976Sjmallett * Octeon lmcx. 46215976Sjmallett * 47215976Sjmallett * This file is auto generated. Do not edit. 48215976Sjmallett * 49215976Sjmallett * <hr>$Revision$<hr> 50215976Sjmallett * 51215976Sjmallett */ 52232812Sjmallett#ifndef __CVMX_LMCX_DEFS_H__ 53232812Sjmallett#define __CVMX_LMCX_DEFS_H__ 54215976Sjmallett 55215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 56215976Sjmallettstatic inline uint64_t CVMX_LMCX_BIST_CTL(unsigned long block_id) 57215976Sjmallett{ 58215976Sjmallett if (!( 59215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 60215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 61215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 62215976Sjmallett cvmx_warn("CVMX_LMCX_BIST_CTL(%lu) is invalid on this chip\n", block_id); 63215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull; 64215976Sjmallett} 65215976Sjmallett#else 66215976Sjmallett#define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull) 67215976Sjmallett#endif 68215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 69215976Sjmallettstatic inline uint64_t CVMX_LMCX_BIST_RESULT(unsigned long block_id) 70215976Sjmallett{ 71215976Sjmallett if (!( 72215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 73215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 74215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 75215976Sjmallett cvmx_warn("CVMX_LMCX_BIST_RESULT(%lu) is invalid on this chip\n", block_id); 76215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull; 77215976Sjmallett} 78215976Sjmallett#else 79215976Sjmallett#define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull) 80215976Sjmallett#endif 81215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 82215976Sjmallettstatic inline uint64_t CVMX_LMCX_CHAR_CTL(unsigned long block_id) 83215976Sjmallett{ 84215976Sjmallett if (!( 85232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 86232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 87232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 88232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 89232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 90215976Sjmallett cvmx_warn("CVMX_LMCX_CHAR_CTL(%lu) is invalid on this chip\n", block_id); 91232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull; 92215976Sjmallett} 93215976Sjmallett#else 94232812Sjmallett#define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull) 95215976Sjmallett#endif 96215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 97215976Sjmallettstatic inline uint64_t CVMX_LMCX_CHAR_MASK0(unsigned long block_id) 98215976Sjmallett{ 99215976Sjmallett if (!( 100232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 101232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 102232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 103232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 104232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 105215976Sjmallett cvmx_warn("CVMX_LMCX_CHAR_MASK0(%lu) is invalid on this chip\n", block_id); 106232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull; 107215976Sjmallett} 108215976Sjmallett#else 109232812Sjmallett#define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull) 110215976Sjmallett#endif 111215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 112215976Sjmallettstatic inline uint64_t CVMX_LMCX_CHAR_MASK1(unsigned long block_id) 113215976Sjmallett{ 114215976Sjmallett if (!( 115232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 116232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 117232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 118232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 119232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 120215976Sjmallett cvmx_warn("CVMX_LMCX_CHAR_MASK1(%lu) is invalid on this chip\n", block_id); 121232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull; 122215976Sjmallett} 123215976Sjmallett#else 124232812Sjmallett#define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull) 125215976Sjmallett#endif 126215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 127215976Sjmallettstatic inline uint64_t CVMX_LMCX_CHAR_MASK2(unsigned long block_id) 128215976Sjmallett{ 129215976Sjmallett if (!( 130232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 131232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 132232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 133232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 134232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 135215976Sjmallett cvmx_warn("CVMX_LMCX_CHAR_MASK2(%lu) is invalid on this chip\n", block_id); 136232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull; 137215976Sjmallett} 138215976Sjmallett#else 139232812Sjmallett#define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull) 140215976Sjmallett#endif 141215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 142215976Sjmallettstatic inline uint64_t CVMX_LMCX_CHAR_MASK3(unsigned long block_id) 143215976Sjmallett{ 144215976Sjmallett if (!( 145232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 146232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 147232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 148232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 149232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 150215976Sjmallett cvmx_warn("CVMX_LMCX_CHAR_MASK3(%lu) is invalid on this chip\n", block_id); 151232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull; 152215976Sjmallett} 153215976Sjmallett#else 154232812Sjmallett#define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull) 155215976Sjmallett#endif 156215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 157215976Sjmallettstatic inline uint64_t CVMX_LMCX_CHAR_MASK4(unsigned long block_id) 158215976Sjmallett{ 159215976Sjmallett if (!( 160232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 161232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 162232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 163232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 164232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 165215976Sjmallett cvmx_warn("CVMX_LMCX_CHAR_MASK4(%lu) is invalid on this chip\n", block_id); 166232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull; 167215976Sjmallett} 168215976Sjmallett#else 169232812Sjmallett#define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull) 170215976Sjmallett#endif 171215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 172215976Sjmallettstatic inline uint64_t CVMX_LMCX_COMP_CTL(unsigned long block_id) 173215976Sjmallett{ 174215976Sjmallett if (!( 175215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 176215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 177215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 178215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 179215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 180215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 181215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 182215976Sjmallett cvmx_warn("CVMX_LMCX_COMP_CTL(%lu) is invalid on this chip\n", block_id); 183215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull; 184215976Sjmallett} 185215976Sjmallett#else 186215976Sjmallett#define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull) 187215976Sjmallett#endif 188215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 189215976Sjmallettstatic inline uint64_t CVMX_LMCX_COMP_CTL2(unsigned long block_id) 190215976Sjmallett{ 191215976Sjmallett if (!( 192232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 193232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 194232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 195232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 196232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 197215976Sjmallett cvmx_warn("CVMX_LMCX_COMP_CTL2(%lu) is invalid on this chip\n", block_id); 198232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull; 199215976Sjmallett} 200215976Sjmallett#else 201232812Sjmallett#define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull) 202215976Sjmallett#endif 203215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 204215976Sjmallettstatic inline uint64_t CVMX_LMCX_CONFIG(unsigned long block_id) 205215976Sjmallett{ 206215976Sjmallett if (!( 207232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 208232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 209232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 210232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 211232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 212215976Sjmallett cvmx_warn("CVMX_LMCX_CONFIG(%lu) is invalid on this chip\n", block_id); 213232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull; 214215976Sjmallett} 215215976Sjmallett#else 216232812Sjmallett#define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull) 217215976Sjmallett#endif 218215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 219215976Sjmallettstatic inline uint64_t CVMX_LMCX_CONTROL(unsigned long block_id) 220215976Sjmallett{ 221215976Sjmallett if (!( 222232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 223232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 224232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 225232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 226232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 227215976Sjmallett cvmx_warn("CVMX_LMCX_CONTROL(%lu) is invalid on this chip\n", block_id); 228232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull; 229215976Sjmallett} 230215976Sjmallett#else 231232812Sjmallett#define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull) 232215976Sjmallett#endif 233215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 234215976Sjmallettstatic inline uint64_t CVMX_LMCX_CTL(unsigned long block_id) 235215976Sjmallett{ 236215976Sjmallett if (!( 237215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 238215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 239215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 240215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 241215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 242215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 243215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 244215976Sjmallett cvmx_warn("CVMX_LMCX_CTL(%lu) is invalid on this chip\n", block_id); 245215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull; 246215976Sjmallett} 247215976Sjmallett#else 248215976Sjmallett#define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull) 249215976Sjmallett#endif 250215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 251215976Sjmallettstatic inline uint64_t CVMX_LMCX_CTL1(unsigned long block_id) 252215976Sjmallett{ 253215976Sjmallett if (!( 254215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 255215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 256215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 257215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 258215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 259215976Sjmallett cvmx_warn("CVMX_LMCX_CTL1(%lu) is invalid on this chip\n", block_id); 260215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull; 261215976Sjmallett} 262215976Sjmallett#else 263215976Sjmallett#define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull) 264215976Sjmallett#endif 265215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 266215976Sjmallettstatic inline uint64_t CVMX_LMCX_DCLK_CNT(unsigned long block_id) 267215976Sjmallett{ 268215976Sjmallett if (!( 269232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 270232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 271232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 272232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 273232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 274215976Sjmallett cvmx_warn("CVMX_LMCX_DCLK_CNT(%lu) is invalid on this chip\n", block_id); 275232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull; 276215976Sjmallett} 277215976Sjmallett#else 278232812Sjmallett#define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull) 279215976Sjmallett#endif 280215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 281215976Sjmallettstatic inline uint64_t CVMX_LMCX_DCLK_CNT_HI(unsigned long block_id) 282215976Sjmallett{ 283215976Sjmallett if (!( 284215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 285215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 286215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 287215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 288215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 289215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 290215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 291215976Sjmallett cvmx_warn("CVMX_LMCX_DCLK_CNT_HI(%lu) is invalid on this chip\n", block_id); 292215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull; 293215976Sjmallett} 294215976Sjmallett#else 295215976Sjmallett#define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull) 296215976Sjmallett#endif 297215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 298215976Sjmallettstatic inline uint64_t CVMX_LMCX_DCLK_CNT_LO(unsigned long block_id) 299215976Sjmallett{ 300215976Sjmallett if (!( 301215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 302215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 303215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 304215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 305215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 306215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 307215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 308215976Sjmallett cvmx_warn("CVMX_LMCX_DCLK_CNT_LO(%lu) is invalid on this chip\n", block_id); 309215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull; 310215976Sjmallett} 311215976Sjmallett#else 312215976Sjmallett#define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull) 313215976Sjmallett#endif 314215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 315215976Sjmallettstatic inline uint64_t CVMX_LMCX_DCLK_CTL(unsigned long block_id) 316215976Sjmallett{ 317215976Sjmallett if (!( 318215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 319215976Sjmallett cvmx_warn("CVMX_LMCX_DCLK_CTL(%lu) is invalid on this chip\n", block_id); 320215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull; 321215976Sjmallett} 322215976Sjmallett#else 323215976Sjmallett#define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull) 324215976Sjmallett#endif 325215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 326215976Sjmallettstatic inline uint64_t CVMX_LMCX_DDR2_CTL(unsigned long block_id) 327215976Sjmallett{ 328215976Sjmallett if (!( 329215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 330215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 331215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 332215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 333215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 334215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 335215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 336215976Sjmallett cvmx_warn("CVMX_LMCX_DDR2_CTL(%lu) is invalid on this chip\n", block_id); 337215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull; 338215976Sjmallett} 339215976Sjmallett#else 340215976Sjmallett#define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull) 341215976Sjmallett#endif 342215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 343215976Sjmallettstatic inline uint64_t CVMX_LMCX_DDR_PLL_CTL(unsigned long block_id) 344215976Sjmallett{ 345215976Sjmallett if (!( 346232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 347232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 348232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 349232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 350232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 351215976Sjmallett cvmx_warn("CVMX_LMCX_DDR_PLL_CTL(%lu) is invalid on this chip\n", block_id); 352232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull; 353215976Sjmallett} 354215976Sjmallett#else 355232812Sjmallett#define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull) 356215976Sjmallett#endif 357215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 358215976Sjmallettstatic inline uint64_t CVMX_LMCX_DELAY_CFG(unsigned long block_id) 359215976Sjmallett{ 360215976Sjmallett if (!( 361215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 362215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 363215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 364215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 365215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 366215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 367215976Sjmallett cvmx_warn("CVMX_LMCX_DELAY_CFG(%lu) is invalid on this chip\n", block_id); 368215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull; 369215976Sjmallett} 370215976Sjmallett#else 371215976Sjmallett#define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull) 372215976Sjmallett#endif 373215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 374215976Sjmallettstatic inline uint64_t CVMX_LMCX_DIMMX_PARAMS(unsigned long offset, unsigned long block_id) 375215976Sjmallett{ 376215976Sjmallett if (!( 377232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 1)) && ((block_id == 0)))) || 378232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id == 0)))) || 379232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0)))) || 380232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 1)) && ((block_id <= 3)))) || 381232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 1)) && ((block_id == 0)))))) 382215976Sjmallett cvmx_warn("CVMX_LMCX_DIMMX_PARAMS(%lu,%lu) is invalid on this chip\n", offset, block_id); 383232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8; 384215976Sjmallett} 385215976Sjmallett#else 386232812Sjmallett#define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8) 387215976Sjmallett#endif 388215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 389215976Sjmallettstatic inline uint64_t CVMX_LMCX_DIMM_CTL(unsigned long block_id) 390215976Sjmallett{ 391215976Sjmallett if (!( 392232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 393232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 394232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 395232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 396232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 397215976Sjmallett cvmx_warn("CVMX_LMCX_DIMM_CTL(%lu) is invalid on this chip\n", block_id); 398232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull; 399215976Sjmallett} 400215976Sjmallett#else 401232812Sjmallett#define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull) 402215976Sjmallett#endif 403215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 404215976Sjmallettstatic inline uint64_t CVMX_LMCX_DLL_CTL(unsigned long block_id) 405215976Sjmallett{ 406215976Sjmallett if (!( 407215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 408215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 409215976Sjmallett cvmx_warn("CVMX_LMCX_DLL_CTL(%lu) is invalid on this chip\n", block_id); 410215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull; 411215976Sjmallett} 412215976Sjmallett#else 413215976Sjmallett#define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull) 414215976Sjmallett#endif 415215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 416215976Sjmallettstatic inline uint64_t CVMX_LMCX_DLL_CTL2(unsigned long block_id) 417215976Sjmallett{ 418215976Sjmallett if (!( 419232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 420232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 421232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 422232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 423232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 424215976Sjmallett cvmx_warn("CVMX_LMCX_DLL_CTL2(%lu) is invalid on this chip\n", block_id); 425232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull; 426215976Sjmallett} 427215976Sjmallett#else 428232812Sjmallett#define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull) 429215976Sjmallett#endif 430215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 431215976Sjmallettstatic inline uint64_t CVMX_LMCX_DLL_CTL3(unsigned long block_id) 432215976Sjmallett{ 433215976Sjmallett if (!( 434232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 435232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 436232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 437232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 438232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 439215976Sjmallett cvmx_warn("CVMX_LMCX_DLL_CTL3(%lu) is invalid on this chip\n", block_id); 440232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull; 441215976Sjmallett} 442215976Sjmallett#else 443232812Sjmallett#define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull) 444215976Sjmallett#endif 445215976Sjmallettstatic inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id) 446215976Sjmallett{ 447232812Sjmallett switch(cvmx_get_octeon_family()) { 448232812Sjmallett case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 449232812Sjmallett case OCTEON_CN50XX & OCTEON_FAMILY_MASK: 450232812Sjmallett case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 451232812Sjmallett case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 452232812Sjmallett case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 453232812Sjmallett case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 454232812Sjmallett case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 455232812Sjmallett if ((block_id == 0)) 456232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 0) * 0x60000000ull; 457232812Sjmallett break; 458232812Sjmallett case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 459232812Sjmallett if ((block_id <= 1)) 460232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 1) * 0x60000000ull; 461232812Sjmallett break; 462232812Sjmallett case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 463232812Sjmallett if ((block_id <= 3)) 464232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 3) * 0x1000000ull; 465232812Sjmallett break; 466232812Sjmallett } 467232812Sjmallett cvmx_warn("CVMX_LMCX_DUAL_MEMCFG (block_id = %lu) not supported on this chip\n", block_id); 468232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 0) * 0x60000000ull; 469215976Sjmallett} 470215976Sjmallettstatic inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id) 471215976Sjmallett{ 472232812Sjmallett switch(cvmx_get_octeon_family()) { 473232812Sjmallett case OCTEON_CN30XX & OCTEON_FAMILY_MASK: 474232812Sjmallett case OCTEON_CN50XX & OCTEON_FAMILY_MASK: 475232812Sjmallett case OCTEON_CN38XX & OCTEON_FAMILY_MASK: 476232812Sjmallett case OCTEON_CN31XX & OCTEON_FAMILY_MASK: 477232812Sjmallett case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 478232812Sjmallett case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 479232812Sjmallett case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 480232812Sjmallett case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 481232812Sjmallett case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 482232812Sjmallett case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 483232812Sjmallett if ((block_id == 0)) 484232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 0) * 0x60000000ull; 485232812Sjmallett break; 486232812Sjmallett case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 487232812Sjmallett if ((block_id <= 1)) 488232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 1) * 0x60000000ull; 489232812Sjmallett break; 490232812Sjmallett case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 491232812Sjmallett if ((block_id <= 3)) 492232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 3) * 0x1000000ull; 493232812Sjmallett break; 494232812Sjmallett } 495232812Sjmallett cvmx_warn("CVMX_LMCX_ECC_SYND (block_id = %lu) not supported on this chip\n", block_id); 496232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 0) * 0x60000000ull; 497215976Sjmallett} 498215976Sjmallettstatic inline uint64_t CVMX_LMCX_FADR(unsigned long block_id) 499215976Sjmallett{ 500232812Sjmallett switch(cvmx_get_octeon_family()) { 501232812Sjmallett case OCTEON_CN30XX & OCTEON_FAMILY_MASK: 502232812Sjmallett case OCTEON_CN50XX & OCTEON_FAMILY_MASK: 503232812Sjmallett case OCTEON_CN38XX & OCTEON_FAMILY_MASK: 504232812Sjmallett case OCTEON_CN31XX & OCTEON_FAMILY_MASK: 505232812Sjmallett case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 506232812Sjmallett case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 507232812Sjmallett case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 508232812Sjmallett case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 509232812Sjmallett case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 510232812Sjmallett case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 511232812Sjmallett if ((block_id == 0)) 512232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 0) * 0x60000000ull; 513232812Sjmallett break; 514232812Sjmallett case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 515232812Sjmallett if ((block_id <= 1)) 516232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 1) * 0x60000000ull; 517232812Sjmallett break; 518232812Sjmallett case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 519232812Sjmallett if ((block_id <= 3)) 520232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 3) * 0x1000000ull; 521232812Sjmallett break; 522232812Sjmallett } 523232812Sjmallett cvmx_warn("CVMX_LMCX_FADR (block_id = %lu) not supported on this chip\n", block_id); 524232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 0) * 0x60000000ull; 525215976Sjmallett} 526215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 527215976Sjmallettstatic inline uint64_t CVMX_LMCX_IFB_CNT(unsigned long block_id) 528215976Sjmallett{ 529215976Sjmallett if (!( 530232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 531232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 532232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 533232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 534232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 535215976Sjmallett cvmx_warn("CVMX_LMCX_IFB_CNT(%lu) is invalid on this chip\n", block_id); 536232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull; 537215976Sjmallett} 538215976Sjmallett#else 539232812Sjmallett#define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull) 540215976Sjmallett#endif 541215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 542215976Sjmallettstatic inline uint64_t CVMX_LMCX_IFB_CNT_HI(unsigned long block_id) 543215976Sjmallett{ 544215976Sjmallett if (!( 545215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 546215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 547215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 548215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 549215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 550215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 551215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 552215976Sjmallett cvmx_warn("CVMX_LMCX_IFB_CNT_HI(%lu) is invalid on this chip\n", block_id); 553215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull; 554215976Sjmallett} 555215976Sjmallett#else 556215976Sjmallett#define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull) 557215976Sjmallett#endif 558215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 559215976Sjmallettstatic inline uint64_t CVMX_LMCX_IFB_CNT_LO(unsigned long block_id) 560215976Sjmallett{ 561215976Sjmallett if (!( 562215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 563215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 564215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 565215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 566215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 567215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 568215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 569215976Sjmallett cvmx_warn("CVMX_LMCX_IFB_CNT_LO(%lu) is invalid on this chip\n", block_id); 570215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull; 571215976Sjmallett} 572215976Sjmallett#else 573215976Sjmallett#define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull) 574215976Sjmallett#endif 575215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 576215976Sjmallettstatic inline uint64_t CVMX_LMCX_INT(unsigned long block_id) 577215976Sjmallett{ 578215976Sjmallett if (!( 579232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 580232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 581232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 582232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 583232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 584215976Sjmallett cvmx_warn("CVMX_LMCX_INT(%lu) is invalid on this chip\n", block_id); 585232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull; 586215976Sjmallett} 587215976Sjmallett#else 588232812Sjmallett#define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull) 589215976Sjmallett#endif 590215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 591215976Sjmallettstatic inline uint64_t CVMX_LMCX_INT_EN(unsigned long block_id) 592215976Sjmallett{ 593215976Sjmallett if (!( 594232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 595232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 596232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 597232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 598232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 599215976Sjmallett cvmx_warn("CVMX_LMCX_INT_EN(%lu) is invalid on this chip\n", block_id); 600232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull; 601215976Sjmallett} 602215976Sjmallett#else 603232812Sjmallett#define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull) 604215976Sjmallett#endif 605215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 606215976Sjmallettstatic inline uint64_t CVMX_LMCX_MEM_CFG0(unsigned long block_id) 607215976Sjmallett{ 608215976Sjmallett if (!( 609215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 610215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 611215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 612215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 613215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 614215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 615215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 616215976Sjmallett cvmx_warn("CVMX_LMCX_MEM_CFG0(%lu) is invalid on this chip\n", block_id); 617215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull; 618215976Sjmallett} 619215976Sjmallett#else 620215976Sjmallett#define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull) 621215976Sjmallett#endif 622215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 623215976Sjmallettstatic inline uint64_t CVMX_LMCX_MEM_CFG1(unsigned long block_id) 624215976Sjmallett{ 625215976Sjmallett if (!( 626215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 627215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 628215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 629215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 630215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 631215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 632215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 633215976Sjmallett cvmx_warn("CVMX_LMCX_MEM_CFG1(%lu) is invalid on this chip\n", block_id); 634215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull; 635215976Sjmallett} 636215976Sjmallett#else 637215976Sjmallett#define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull) 638215976Sjmallett#endif 639215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 640215976Sjmallettstatic inline uint64_t CVMX_LMCX_MODEREG_PARAMS0(unsigned long block_id) 641215976Sjmallett{ 642215976Sjmallett if (!( 643232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 644232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 645232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 646232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 647232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 648215976Sjmallett cvmx_warn("CVMX_LMCX_MODEREG_PARAMS0(%lu) is invalid on this chip\n", block_id); 649232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull; 650215976Sjmallett} 651215976Sjmallett#else 652232812Sjmallett#define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull) 653215976Sjmallett#endif 654215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 655215976Sjmallettstatic inline uint64_t CVMX_LMCX_MODEREG_PARAMS1(unsigned long block_id) 656215976Sjmallett{ 657215976Sjmallett if (!( 658232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 659232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 660232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 661232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 662232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 663215976Sjmallett cvmx_warn("CVMX_LMCX_MODEREG_PARAMS1(%lu) is invalid on this chip\n", block_id); 664232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull; 665215976Sjmallett} 666215976Sjmallett#else 667232812Sjmallett#define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull) 668215976Sjmallett#endif 669215976Sjmallettstatic inline uint64_t CVMX_LMCX_NXM(unsigned long block_id) 670215976Sjmallett{ 671232812Sjmallett switch(cvmx_get_octeon_family()) { 672232812Sjmallett case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: 673232812Sjmallett case OCTEON_CN61XX & OCTEON_FAMILY_MASK: 674232812Sjmallett case OCTEON_CN66XX & OCTEON_FAMILY_MASK: 675232812Sjmallett case OCTEON_CN52XX & OCTEON_FAMILY_MASK: 676232812Sjmallett case OCTEON_CN58XX & OCTEON_FAMILY_MASK: 677232812Sjmallett case OCTEON_CN63XX & OCTEON_FAMILY_MASK: 678232812Sjmallett if ((block_id == 0)) 679232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 0) * 0x60000000ull; 680232812Sjmallett break; 681232812Sjmallett case OCTEON_CN56XX & OCTEON_FAMILY_MASK: 682232812Sjmallett if ((block_id <= 1)) 683232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 1) * 0x60000000ull; 684232812Sjmallett break; 685232812Sjmallett case OCTEON_CN68XX & OCTEON_FAMILY_MASK: 686232812Sjmallett if ((block_id <= 3)) 687232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 3) * 0x1000000ull; 688232812Sjmallett break; 689232812Sjmallett } 690232812Sjmallett cvmx_warn("CVMX_LMCX_NXM (block_id = %lu) not supported on this chip\n", block_id); 691232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 0) * 0x60000000ull; 692215976Sjmallett} 693215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 694215976Sjmallettstatic inline uint64_t CVMX_LMCX_OPS_CNT(unsigned long block_id) 695215976Sjmallett{ 696215976Sjmallett if (!( 697232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 698232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 699232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 700232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 701232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 702215976Sjmallett cvmx_warn("CVMX_LMCX_OPS_CNT(%lu) is invalid on this chip\n", block_id); 703232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull; 704215976Sjmallett} 705215976Sjmallett#else 706232812Sjmallett#define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull) 707215976Sjmallett#endif 708215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 709215976Sjmallettstatic inline uint64_t CVMX_LMCX_OPS_CNT_HI(unsigned long block_id) 710215976Sjmallett{ 711215976Sjmallett if (!( 712215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 713215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 714215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 715215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 716215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 717215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 718215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 719215976Sjmallett cvmx_warn("CVMX_LMCX_OPS_CNT_HI(%lu) is invalid on this chip\n", block_id); 720215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull; 721215976Sjmallett} 722215976Sjmallett#else 723215976Sjmallett#define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull) 724215976Sjmallett#endif 725215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 726215976Sjmallettstatic inline uint64_t CVMX_LMCX_OPS_CNT_LO(unsigned long block_id) 727215976Sjmallett{ 728215976Sjmallett if (!( 729215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 730215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 731215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 732215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 733215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 734215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 735215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 736215976Sjmallett cvmx_warn("CVMX_LMCX_OPS_CNT_LO(%lu) is invalid on this chip\n", block_id); 737215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull; 738215976Sjmallett} 739215976Sjmallett#else 740215976Sjmallett#define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull) 741215976Sjmallett#endif 742215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 743215976Sjmallettstatic inline uint64_t CVMX_LMCX_PHY_CTL(unsigned long block_id) 744215976Sjmallett{ 745215976Sjmallett if (!( 746232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 747232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 748232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 749232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 750232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 751215976Sjmallett cvmx_warn("CVMX_LMCX_PHY_CTL(%lu) is invalid on this chip\n", block_id); 752232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull; 753215976Sjmallett} 754215976Sjmallett#else 755232812Sjmallett#define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull) 756215976Sjmallett#endif 757215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 758215976Sjmallettstatic inline uint64_t CVMX_LMCX_PLL_BWCTL(unsigned long block_id) 759215976Sjmallett{ 760215976Sjmallett if (!( 761215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 762215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 763215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))))) 764215976Sjmallett cvmx_warn("CVMX_LMCX_PLL_BWCTL(%lu) is invalid on this chip\n", block_id); 765215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000040ull); 766215976Sjmallett} 767215976Sjmallett#else 768215976Sjmallett#define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull)) 769215976Sjmallett#endif 770215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 771215976Sjmallettstatic inline uint64_t CVMX_LMCX_PLL_CTL(unsigned long block_id) 772215976Sjmallett{ 773215976Sjmallett if (!( 774215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 775215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 776215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 777215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 778215976Sjmallett cvmx_warn("CVMX_LMCX_PLL_CTL(%lu) is invalid on this chip\n", block_id); 779215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull; 780215976Sjmallett} 781215976Sjmallett#else 782215976Sjmallett#define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull) 783215976Sjmallett#endif 784215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 785215976Sjmallettstatic inline uint64_t CVMX_LMCX_PLL_STATUS(unsigned long block_id) 786215976Sjmallett{ 787215976Sjmallett if (!( 788215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 789215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 790215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 791215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 792215976Sjmallett cvmx_warn("CVMX_LMCX_PLL_STATUS(%lu) is invalid on this chip\n", block_id); 793215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull; 794215976Sjmallett} 795215976Sjmallett#else 796215976Sjmallett#define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull) 797215976Sjmallett#endif 798215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 799215976Sjmallettstatic inline uint64_t CVMX_LMCX_READ_LEVEL_CTL(unsigned long block_id) 800215976Sjmallett{ 801215976Sjmallett if (!( 802215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 803215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 804215976Sjmallett cvmx_warn("CVMX_LMCX_READ_LEVEL_CTL(%lu) is invalid on this chip\n", block_id); 805215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull; 806215976Sjmallett} 807215976Sjmallett#else 808215976Sjmallett#define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull) 809215976Sjmallett#endif 810215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 811215976Sjmallettstatic inline uint64_t CVMX_LMCX_READ_LEVEL_DBG(unsigned long block_id) 812215976Sjmallett{ 813215976Sjmallett if (!( 814215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 815215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 816215976Sjmallett cvmx_warn("CVMX_LMCX_READ_LEVEL_DBG(%lu) is invalid on this chip\n", block_id); 817215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull; 818215976Sjmallett} 819215976Sjmallett#else 820215976Sjmallett#define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull) 821215976Sjmallett#endif 822215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 823215976Sjmallettstatic inline uint64_t CVMX_LMCX_READ_LEVEL_RANKX(unsigned long offset, unsigned long block_id) 824215976Sjmallett{ 825215976Sjmallett if (!( 826215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) || 827215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))))) 828215976Sjmallett cvmx_warn("CVMX_LMCX_READ_LEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id); 829215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8; 830215976Sjmallett} 831215976Sjmallett#else 832215976Sjmallett#define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8) 833215976Sjmallett#endif 834215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 835215976Sjmallettstatic inline uint64_t CVMX_LMCX_RESET_CTL(unsigned long block_id) 836215976Sjmallett{ 837215976Sjmallett if (!( 838232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 839232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 840232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 841232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 842232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 843215976Sjmallett cvmx_warn("CVMX_LMCX_RESET_CTL(%lu) is invalid on this chip\n", block_id); 844232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull; 845215976Sjmallett} 846215976Sjmallett#else 847232812Sjmallett#define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull) 848215976Sjmallett#endif 849215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 850215976Sjmallettstatic inline uint64_t CVMX_LMCX_RLEVEL_CTL(unsigned long block_id) 851215976Sjmallett{ 852215976Sjmallett if (!( 853232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 854232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 855232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 856232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 857232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 858215976Sjmallett cvmx_warn("CVMX_LMCX_RLEVEL_CTL(%lu) is invalid on this chip\n", block_id); 859232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull; 860215976Sjmallett} 861215976Sjmallett#else 862232812Sjmallett#define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull) 863215976Sjmallett#endif 864215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 865215976Sjmallettstatic inline uint64_t CVMX_LMCX_RLEVEL_DBG(unsigned long block_id) 866215976Sjmallett{ 867215976Sjmallett if (!( 868232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 869232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 870232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 871232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 872232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 873215976Sjmallett cvmx_warn("CVMX_LMCX_RLEVEL_DBG(%lu) is invalid on this chip\n", block_id); 874232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull; 875215976Sjmallett} 876215976Sjmallett#else 877232812Sjmallett#define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull) 878215976Sjmallett#endif 879215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 880215976Sjmallettstatic inline uint64_t CVMX_LMCX_RLEVEL_RANKX(unsigned long offset, unsigned long block_id) 881215976Sjmallett{ 882215976Sjmallett if (!( 883232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 3)) && ((block_id == 0)))) || 884232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))) || 885232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0)))) || 886232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 3)))) || 887232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 3)) && ((block_id == 0)))))) 888215976Sjmallett cvmx_warn("CVMX_LMCX_RLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id); 889232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8; 890215976Sjmallett} 891215976Sjmallett#else 892232812Sjmallett#define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8) 893215976Sjmallett#endif 894215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 895215976Sjmallettstatic inline uint64_t CVMX_LMCX_RODT_COMP_CTL(unsigned long block_id) 896215976Sjmallett{ 897215976Sjmallett if (!( 898215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 899215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 900215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 901215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 902215976Sjmallett cvmx_warn("CVMX_LMCX_RODT_COMP_CTL(%lu) is invalid on this chip\n", block_id); 903215976Sjmallett return CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull; 904215976Sjmallett} 905215976Sjmallett#else 906215976Sjmallett#define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull) 907215976Sjmallett#endif 908215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 909215976Sjmallettstatic inline uint64_t CVMX_LMCX_RODT_CTL(unsigned long block_id) 910215976Sjmallett{ 911215976Sjmallett if (!( 912215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 913215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 914215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 915215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 916215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 917215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 918215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 919215976Sjmallett cvmx_warn("CVMX_LMCX_RODT_CTL(%lu) is invalid on this chip\n", block_id); 920215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull; 921215976Sjmallett} 922215976Sjmallett#else 923215976Sjmallett#define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull) 924215976Sjmallett#endif 925215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 926215976Sjmallettstatic inline uint64_t CVMX_LMCX_RODT_MASK(unsigned long block_id) 927215976Sjmallett{ 928215976Sjmallett if (!( 929232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 930232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 931232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 932232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 933232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 934215976Sjmallett cvmx_warn("CVMX_LMCX_RODT_MASK(%lu) is invalid on this chip\n", block_id); 935232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull; 936215976Sjmallett} 937215976Sjmallett#else 938232812Sjmallett#define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull) 939215976Sjmallett#endif 940215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 941232812Sjmallettstatic inline uint64_t CVMX_LMCX_SCRAMBLED_FADR(unsigned long block_id) 942232812Sjmallett{ 943232812Sjmallett if (!( 944232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 945232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 946232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 947232812Sjmallett cvmx_warn("CVMX_LMCX_SCRAMBLED_FADR(%lu) is invalid on this chip\n", block_id); 948232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000330ull); 949232812Sjmallett} 950232812Sjmallett#else 951232812Sjmallett#define CVMX_LMCX_SCRAMBLED_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000330ull)) 952232812Sjmallett#endif 953232812Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 954232812Sjmallettstatic inline uint64_t CVMX_LMCX_SCRAMBLE_CFG0(unsigned long block_id) 955232812Sjmallett{ 956232812Sjmallett if (!( 957232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 958232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 959232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 960232812Sjmallett cvmx_warn("CVMX_LMCX_SCRAMBLE_CFG0(%lu) is invalid on this chip\n", block_id); 961232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000320ull); 962232812Sjmallett} 963232812Sjmallett#else 964232812Sjmallett#define CVMX_LMCX_SCRAMBLE_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000320ull)) 965232812Sjmallett#endif 966232812Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 967232812Sjmallettstatic inline uint64_t CVMX_LMCX_SCRAMBLE_CFG1(unsigned long block_id) 968232812Sjmallett{ 969232812Sjmallett if (!( 970232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 971232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 972232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 973232812Sjmallett cvmx_warn("CVMX_LMCX_SCRAMBLE_CFG1(%lu) is invalid on this chip\n", block_id); 974232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000328ull); 975232812Sjmallett} 976232812Sjmallett#else 977232812Sjmallett#define CVMX_LMCX_SCRAMBLE_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000328ull)) 978232812Sjmallett#endif 979232812Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 980215976Sjmallettstatic inline uint64_t CVMX_LMCX_SLOT_CTL0(unsigned long block_id) 981215976Sjmallett{ 982215976Sjmallett if (!( 983232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 984232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 985232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 986232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 987232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 988215976Sjmallett cvmx_warn("CVMX_LMCX_SLOT_CTL0(%lu) is invalid on this chip\n", block_id); 989232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull; 990215976Sjmallett} 991215976Sjmallett#else 992232812Sjmallett#define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull) 993215976Sjmallett#endif 994215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 995215976Sjmallettstatic inline uint64_t CVMX_LMCX_SLOT_CTL1(unsigned long block_id) 996215976Sjmallett{ 997215976Sjmallett if (!( 998232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 999232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1000232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1001232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1002232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1003215976Sjmallett cvmx_warn("CVMX_LMCX_SLOT_CTL1(%lu) is invalid on this chip\n", block_id); 1004232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull; 1005215976Sjmallett} 1006215976Sjmallett#else 1007232812Sjmallett#define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull) 1008215976Sjmallett#endif 1009215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1010215976Sjmallettstatic inline uint64_t CVMX_LMCX_SLOT_CTL2(unsigned long block_id) 1011215976Sjmallett{ 1012215976Sjmallett if (!( 1013232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1014232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1015232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1016232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1017232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1018215976Sjmallett cvmx_warn("CVMX_LMCX_SLOT_CTL2(%lu) is invalid on this chip\n", block_id); 1019232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull; 1020215976Sjmallett} 1021215976Sjmallett#else 1022232812Sjmallett#define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull) 1023215976Sjmallett#endif 1024215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1025215976Sjmallettstatic inline uint64_t CVMX_LMCX_TIMING_PARAMS0(unsigned long block_id) 1026215976Sjmallett{ 1027215976Sjmallett if (!( 1028232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1029232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1030232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1031232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1032232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1033215976Sjmallett cvmx_warn("CVMX_LMCX_TIMING_PARAMS0(%lu) is invalid on this chip\n", block_id); 1034232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull; 1035215976Sjmallett} 1036215976Sjmallett#else 1037232812Sjmallett#define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull) 1038215976Sjmallett#endif 1039215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1040215976Sjmallettstatic inline uint64_t CVMX_LMCX_TIMING_PARAMS1(unsigned long block_id) 1041215976Sjmallett{ 1042215976Sjmallett if (!( 1043232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1044232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1045232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1046232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1047232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1048215976Sjmallett cvmx_warn("CVMX_LMCX_TIMING_PARAMS1(%lu) is invalid on this chip\n", block_id); 1049232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull; 1050215976Sjmallett} 1051215976Sjmallett#else 1052232812Sjmallett#define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull) 1053215976Sjmallett#endif 1054215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1055215976Sjmallettstatic inline uint64_t CVMX_LMCX_TRO_CTL(unsigned long block_id) 1056215976Sjmallett{ 1057215976Sjmallett if (!( 1058232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1059232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1060232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1061232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1062232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1063215976Sjmallett cvmx_warn("CVMX_LMCX_TRO_CTL(%lu) is invalid on this chip\n", block_id); 1064232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull; 1065215976Sjmallett} 1066215976Sjmallett#else 1067232812Sjmallett#define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull) 1068215976Sjmallett#endif 1069215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1070215976Sjmallettstatic inline uint64_t CVMX_LMCX_TRO_STAT(unsigned long block_id) 1071215976Sjmallett{ 1072215976Sjmallett if (!( 1073232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1074232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1075232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1076232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1077232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1078215976Sjmallett cvmx_warn("CVMX_LMCX_TRO_STAT(%lu) is invalid on this chip\n", block_id); 1079232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull; 1080215976Sjmallett} 1081215976Sjmallett#else 1082232812Sjmallett#define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull) 1083215976Sjmallett#endif 1084215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1085215976Sjmallettstatic inline uint64_t CVMX_LMCX_WLEVEL_CTL(unsigned long block_id) 1086215976Sjmallett{ 1087215976Sjmallett if (!( 1088232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1089232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1090232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1091232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1092232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1093215976Sjmallett cvmx_warn("CVMX_LMCX_WLEVEL_CTL(%lu) is invalid on this chip\n", block_id); 1094232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull; 1095215976Sjmallett} 1096215976Sjmallett#else 1097232812Sjmallett#define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull) 1098215976Sjmallett#endif 1099215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1100215976Sjmallettstatic inline uint64_t CVMX_LMCX_WLEVEL_DBG(unsigned long block_id) 1101215976Sjmallett{ 1102215976Sjmallett if (!( 1103232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1104232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1105232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1106232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1107232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1108215976Sjmallett cvmx_warn("CVMX_LMCX_WLEVEL_DBG(%lu) is invalid on this chip\n", block_id); 1109232812Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull; 1110215976Sjmallett} 1111215976Sjmallett#else 1112232812Sjmallett#define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull) 1113215976Sjmallett#endif 1114215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1115215976Sjmallettstatic inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offset, unsigned long block_id) 1116215976Sjmallett{ 1117215976Sjmallett if (!( 1118232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 3)) && ((block_id == 0)))) || 1119232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))) || 1120232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0)))) || 1121232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 3)))) || 1122232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 3)) && ((block_id == 0)))))) 1123215976Sjmallett cvmx_warn("CVMX_LMCX_WLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id); 1124232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8; 1125215976Sjmallett} 1126215976Sjmallett#else 1127232812Sjmallett#define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8) 1128215976Sjmallett#endif 1129215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1130215976Sjmallettstatic inline uint64_t CVMX_LMCX_WODT_CTL0(unsigned long block_id) 1131215976Sjmallett{ 1132215976Sjmallett if (!( 1133215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1134215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1135215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) || 1136215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) || 1137215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1138215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) || 1139215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))))) 1140215976Sjmallett cvmx_warn("CVMX_LMCX_WODT_CTL0(%lu) is invalid on this chip\n", block_id); 1141215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull; 1142215976Sjmallett} 1143215976Sjmallett#else 1144215976Sjmallett#define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull) 1145215976Sjmallett#endif 1146215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1147215976Sjmallettstatic inline uint64_t CVMX_LMCX_WODT_CTL1(unsigned long block_id) 1148215976Sjmallett{ 1149215976Sjmallett if (!( 1150215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) || 1151215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) || 1152215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) || 1153215976Sjmallett (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))))) 1154215976Sjmallett cvmx_warn("CVMX_LMCX_WODT_CTL1(%lu) is invalid on this chip\n", block_id); 1155215976Sjmallett return CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull; 1156215976Sjmallett} 1157215976Sjmallett#else 1158215976Sjmallett#define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull) 1159215976Sjmallett#endif 1160215976Sjmallett#if CVMX_ENABLE_CSR_ADDRESS_CHECKING 1161215976Sjmallettstatic inline uint64_t CVMX_LMCX_WODT_MASK(unsigned long block_id) 1162215976Sjmallett{ 1163215976Sjmallett if (!( 1164232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) || 1165232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) || 1166232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) || 1167232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) || 1168232812Sjmallett (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0))))) 1169215976Sjmallett cvmx_warn("CVMX_LMCX_WODT_MASK(%lu) is invalid on this chip\n", block_id); 1170232812Sjmallett return CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull; 1171215976Sjmallett} 1172215976Sjmallett#else 1173232812Sjmallett#define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull) 1174215976Sjmallett#endif 1175215976Sjmallett 1176215976Sjmallett/** 1177215976Sjmallett * cvmx_lmc#_bist_ctl 1178215976Sjmallett * 1179215976Sjmallett * Notes: 1180215976Sjmallett * This controls BiST only for the memories that operate on DCLK. The normal, chip-wide BiST flow 1181215976Sjmallett * controls BiST for the memories that operate on ECLK. 1182215976Sjmallett */ 1183232812Sjmallettunion cvmx_lmcx_bist_ctl { 1184215976Sjmallett uint64_t u64; 1185232812Sjmallett struct cvmx_lmcx_bist_ctl_s { 1186232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1187215976Sjmallett uint64_t reserved_1_63 : 63; 1188215976Sjmallett uint64_t start : 1; /**< A 0->1 transition causes BiST to run. */ 1189215976Sjmallett#else 1190215976Sjmallett uint64_t start : 1; 1191215976Sjmallett uint64_t reserved_1_63 : 63; 1192215976Sjmallett#endif 1193215976Sjmallett } s; 1194215976Sjmallett struct cvmx_lmcx_bist_ctl_s cn50xx; 1195215976Sjmallett struct cvmx_lmcx_bist_ctl_s cn52xx; 1196215976Sjmallett struct cvmx_lmcx_bist_ctl_s cn52xxp1; 1197215976Sjmallett struct cvmx_lmcx_bist_ctl_s cn56xx; 1198215976Sjmallett struct cvmx_lmcx_bist_ctl_s cn56xxp1; 1199215976Sjmallett}; 1200215976Sjmalletttypedef union cvmx_lmcx_bist_ctl cvmx_lmcx_bist_ctl_t; 1201215976Sjmallett 1202215976Sjmallett/** 1203215976Sjmallett * cvmx_lmc#_bist_result 1204215976Sjmallett * 1205215976Sjmallett * Notes: 1206215976Sjmallett * Access to the internal BiST results 1207215976Sjmallett * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail). 1208215976Sjmallett */ 1209232812Sjmallettunion cvmx_lmcx_bist_result { 1210215976Sjmallett uint64_t u64; 1211232812Sjmallett struct cvmx_lmcx_bist_result_s { 1212232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1213215976Sjmallett uint64_t reserved_11_63 : 53; 1214215976Sjmallett uint64_t csrd2e : 1; /**< BiST result of CSRD2E memory (0=pass, !0=fail) */ 1215215976Sjmallett uint64_t csre2d : 1; /**< BiST result of CSRE2D memory (0=pass, !0=fail) */ 1216215976Sjmallett uint64_t mwf : 1; /**< BiST result of MWF memories (0=pass, !0=fail) */ 1217215976Sjmallett uint64_t mwd : 3; /**< BiST result of MWD memories (0=pass, !0=fail) */ 1218215976Sjmallett uint64_t mwc : 1; /**< BiST result of MWC memories (0=pass, !0=fail) */ 1219215976Sjmallett uint64_t mrf : 1; /**< BiST result of MRF memories (0=pass, !0=fail) */ 1220215976Sjmallett uint64_t mrd : 3; /**< BiST result of MRD memories (0=pass, !0=fail) */ 1221215976Sjmallett#else 1222215976Sjmallett uint64_t mrd : 3; 1223215976Sjmallett uint64_t mrf : 1; 1224215976Sjmallett uint64_t mwc : 1; 1225215976Sjmallett uint64_t mwd : 3; 1226215976Sjmallett uint64_t mwf : 1; 1227215976Sjmallett uint64_t csre2d : 1; 1228215976Sjmallett uint64_t csrd2e : 1; 1229215976Sjmallett uint64_t reserved_11_63 : 53; 1230215976Sjmallett#endif 1231215976Sjmallett } s; 1232232812Sjmallett struct cvmx_lmcx_bist_result_cn50xx { 1233232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1234215976Sjmallett uint64_t reserved_9_63 : 55; 1235215976Sjmallett uint64_t mwf : 1; /**< BiST result of MWF memories (0=pass, !0=fail) */ 1236215976Sjmallett uint64_t mwd : 3; /**< BiST result of MWD memories (0=pass, !0=fail) */ 1237215976Sjmallett uint64_t mwc : 1; /**< BiST result of MWC memories (0=pass, !0=fail) */ 1238215976Sjmallett uint64_t mrf : 1; /**< BiST result of MRF memories (0=pass, !0=fail) */ 1239215976Sjmallett uint64_t mrd : 3; /**< BiST result of MRD memories (0=pass, !0=fail) */ 1240215976Sjmallett#else 1241215976Sjmallett uint64_t mrd : 3; 1242215976Sjmallett uint64_t mrf : 1; 1243215976Sjmallett uint64_t mwc : 1; 1244215976Sjmallett uint64_t mwd : 3; 1245215976Sjmallett uint64_t mwf : 1; 1246215976Sjmallett uint64_t reserved_9_63 : 55; 1247215976Sjmallett#endif 1248215976Sjmallett } cn50xx; 1249215976Sjmallett struct cvmx_lmcx_bist_result_s cn52xx; 1250215976Sjmallett struct cvmx_lmcx_bist_result_s cn52xxp1; 1251215976Sjmallett struct cvmx_lmcx_bist_result_s cn56xx; 1252215976Sjmallett struct cvmx_lmcx_bist_result_s cn56xxp1; 1253215976Sjmallett}; 1254215976Sjmalletttypedef union cvmx_lmcx_bist_result cvmx_lmcx_bist_result_t; 1255215976Sjmallett 1256215976Sjmallett/** 1257215976Sjmallett * cvmx_lmc#_char_ctl 1258215976Sjmallett * 1259215976Sjmallett * LMC_CHAR_CTL = LMC Characterization Control 1260215976Sjmallett * This register is an assortment of various control fields needed to charecterize the DDR3 interface 1261215976Sjmallett */ 1262232812Sjmallettunion cvmx_lmcx_char_ctl { 1263215976Sjmallett uint64_t u64; 1264232812Sjmallett struct cvmx_lmcx_char_ctl_s { 1265232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1266232812Sjmallett uint64_t reserved_44_63 : 20; 1267232812Sjmallett uint64_t dr : 1; /**< Pattern at Data Rate (not Clock Rate) */ 1268232812Sjmallett uint64_t skew_on : 1; /**< Skew adjacent bits */ 1269232812Sjmallett uint64_t en : 1; /**< Enable characterization */ 1270232812Sjmallett uint64_t sel : 1; /**< Pattern select 1271232812Sjmallett 0 = PRBS 1272232812Sjmallett 1 = Programmable pattern */ 1273232812Sjmallett uint64_t prog : 8; /**< Programmable pattern */ 1274232812Sjmallett uint64_t prbs : 32; /**< PRBS Polynomial */ 1275232812Sjmallett#else 1276232812Sjmallett uint64_t prbs : 32; 1277232812Sjmallett uint64_t prog : 8; 1278232812Sjmallett uint64_t sel : 1; 1279232812Sjmallett uint64_t en : 1; 1280232812Sjmallett uint64_t skew_on : 1; 1281232812Sjmallett uint64_t dr : 1; 1282232812Sjmallett uint64_t reserved_44_63 : 20; 1283232812Sjmallett#endif 1284232812Sjmallett } s; 1285232812Sjmallett struct cvmx_lmcx_char_ctl_s cn61xx; 1286232812Sjmallett struct cvmx_lmcx_char_ctl_cn63xx { 1287232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1288215976Sjmallett uint64_t reserved_42_63 : 22; 1289215976Sjmallett uint64_t en : 1; /**< Enable characterization */ 1290215976Sjmallett uint64_t sel : 1; /**< Pattern select 1291215976Sjmallett 0 = PRBS 1292215976Sjmallett 1 = Programmable pattern */ 1293215976Sjmallett uint64_t prog : 8; /**< Programmable pattern */ 1294215976Sjmallett uint64_t prbs : 32; /**< PRBS Polynomial */ 1295215976Sjmallett#else 1296215976Sjmallett uint64_t prbs : 32; 1297215976Sjmallett uint64_t prog : 8; 1298215976Sjmallett uint64_t sel : 1; 1299215976Sjmallett uint64_t en : 1; 1300215976Sjmallett uint64_t reserved_42_63 : 22; 1301215976Sjmallett#endif 1302232812Sjmallett } cn63xx; 1303232812Sjmallett struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1; 1304232812Sjmallett struct cvmx_lmcx_char_ctl_s cn66xx; 1305232812Sjmallett struct cvmx_lmcx_char_ctl_s cn68xx; 1306232812Sjmallett struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1; 1307232812Sjmallett struct cvmx_lmcx_char_ctl_s cnf71xx; 1308215976Sjmallett}; 1309215976Sjmalletttypedef union cvmx_lmcx_char_ctl cvmx_lmcx_char_ctl_t; 1310215976Sjmallett 1311215976Sjmallett/** 1312215976Sjmallett * cvmx_lmc#_char_mask0 1313215976Sjmallett * 1314215976Sjmallett * LMC_CHAR_MASK0 = LMC Characterization Mask0 1315215976Sjmallett * This register is an assortment of various control fields needed to charecterize the DDR3 interface 1316215976Sjmallett */ 1317232812Sjmallettunion cvmx_lmcx_char_mask0 { 1318215976Sjmallett uint64_t u64; 1319232812Sjmallett struct cvmx_lmcx_char_mask0_s { 1320232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1321215976Sjmallett uint64_t mask : 64; /**< Mask for DQ0[63:0] */ 1322215976Sjmallett#else 1323215976Sjmallett uint64_t mask : 64; 1324215976Sjmallett#endif 1325215976Sjmallett } s; 1326232812Sjmallett struct cvmx_lmcx_char_mask0_s cn61xx; 1327215976Sjmallett struct cvmx_lmcx_char_mask0_s cn63xx; 1328215976Sjmallett struct cvmx_lmcx_char_mask0_s cn63xxp1; 1329232812Sjmallett struct cvmx_lmcx_char_mask0_s cn66xx; 1330232812Sjmallett struct cvmx_lmcx_char_mask0_s cn68xx; 1331232812Sjmallett struct cvmx_lmcx_char_mask0_s cn68xxp1; 1332232812Sjmallett struct cvmx_lmcx_char_mask0_s cnf71xx; 1333215976Sjmallett}; 1334215976Sjmalletttypedef union cvmx_lmcx_char_mask0 cvmx_lmcx_char_mask0_t; 1335215976Sjmallett 1336215976Sjmallett/** 1337215976Sjmallett * cvmx_lmc#_char_mask1 1338215976Sjmallett * 1339215976Sjmallett * LMC_CHAR_MASK1 = LMC Characterization Mask1 1340215976Sjmallett * This register is an assortment of various control fields needed to charecterize the DDR3 interface 1341215976Sjmallett */ 1342232812Sjmallettunion cvmx_lmcx_char_mask1 { 1343215976Sjmallett uint64_t u64; 1344232812Sjmallett struct cvmx_lmcx_char_mask1_s { 1345232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1346215976Sjmallett uint64_t reserved_8_63 : 56; 1347215976Sjmallett uint64_t mask : 8; /**< Mask for DQ0[71:64] */ 1348215976Sjmallett#else 1349215976Sjmallett uint64_t mask : 8; 1350215976Sjmallett uint64_t reserved_8_63 : 56; 1351215976Sjmallett#endif 1352215976Sjmallett } s; 1353232812Sjmallett struct cvmx_lmcx_char_mask1_s cn61xx; 1354215976Sjmallett struct cvmx_lmcx_char_mask1_s cn63xx; 1355215976Sjmallett struct cvmx_lmcx_char_mask1_s cn63xxp1; 1356232812Sjmallett struct cvmx_lmcx_char_mask1_s cn66xx; 1357232812Sjmallett struct cvmx_lmcx_char_mask1_s cn68xx; 1358232812Sjmallett struct cvmx_lmcx_char_mask1_s cn68xxp1; 1359232812Sjmallett struct cvmx_lmcx_char_mask1_s cnf71xx; 1360215976Sjmallett}; 1361215976Sjmalletttypedef union cvmx_lmcx_char_mask1 cvmx_lmcx_char_mask1_t; 1362215976Sjmallett 1363215976Sjmallett/** 1364215976Sjmallett * cvmx_lmc#_char_mask2 1365215976Sjmallett * 1366215976Sjmallett * LMC_CHAR_MASK2 = LMC Characterization Mask2 1367215976Sjmallett * This register is an assortment of various control fields needed to charecterize the DDR3 interface 1368215976Sjmallett */ 1369232812Sjmallettunion cvmx_lmcx_char_mask2 { 1370215976Sjmallett uint64_t u64; 1371232812Sjmallett struct cvmx_lmcx_char_mask2_s { 1372232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1373215976Sjmallett uint64_t mask : 64; /**< Mask for DQ1[63:0] */ 1374215976Sjmallett#else 1375215976Sjmallett uint64_t mask : 64; 1376215976Sjmallett#endif 1377215976Sjmallett } s; 1378232812Sjmallett struct cvmx_lmcx_char_mask2_s cn61xx; 1379215976Sjmallett struct cvmx_lmcx_char_mask2_s cn63xx; 1380215976Sjmallett struct cvmx_lmcx_char_mask2_s cn63xxp1; 1381232812Sjmallett struct cvmx_lmcx_char_mask2_s cn66xx; 1382232812Sjmallett struct cvmx_lmcx_char_mask2_s cn68xx; 1383232812Sjmallett struct cvmx_lmcx_char_mask2_s cn68xxp1; 1384232812Sjmallett struct cvmx_lmcx_char_mask2_s cnf71xx; 1385215976Sjmallett}; 1386215976Sjmalletttypedef union cvmx_lmcx_char_mask2 cvmx_lmcx_char_mask2_t; 1387215976Sjmallett 1388215976Sjmallett/** 1389215976Sjmallett * cvmx_lmc#_char_mask3 1390215976Sjmallett * 1391215976Sjmallett * LMC_CHAR_MASK3 = LMC Characterization Mask3 1392215976Sjmallett * This register is an assortment of various control fields needed to charecterize the DDR3 interface 1393215976Sjmallett */ 1394232812Sjmallettunion cvmx_lmcx_char_mask3 { 1395215976Sjmallett uint64_t u64; 1396232812Sjmallett struct cvmx_lmcx_char_mask3_s { 1397232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1398215976Sjmallett uint64_t reserved_8_63 : 56; 1399215976Sjmallett uint64_t mask : 8; /**< Mask for DQ1[71:64] */ 1400215976Sjmallett#else 1401215976Sjmallett uint64_t mask : 8; 1402215976Sjmallett uint64_t reserved_8_63 : 56; 1403215976Sjmallett#endif 1404215976Sjmallett } s; 1405232812Sjmallett struct cvmx_lmcx_char_mask3_s cn61xx; 1406215976Sjmallett struct cvmx_lmcx_char_mask3_s cn63xx; 1407215976Sjmallett struct cvmx_lmcx_char_mask3_s cn63xxp1; 1408232812Sjmallett struct cvmx_lmcx_char_mask3_s cn66xx; 1409232812Sjmallett struct cvmx_lmcx_char_mask3_s cn68xx; 1410232812Sjmallett struct cvmx_lmcx_char_mask3_s cn68xxp1; 1411232812Sjmallett struct cvmx_lmcx_char_mask3_s cnf71xx; 1412215976Sjmallett}; 1413215976Sjmalletttypedef union cvmx_lmcx_char_mask3 cvmx_lmcx_char_mask3_t; 1414215976Sjmallett 1415215976Sjmallett/** 1416215976Sjmallett * cvmx_lmc#_char_mask4 1417215976Sjmallett * 1418215976Sjmallett * LMC_CHAR_MASK4 = LMC Characterization Mask4 1419215976Sjmallett * This register is an assortment of various control fields needed to charecterize the DDR3 interface 1420215976Sjmallett */ 1421232812Sjmallettunion cvmx_lmcx_char_mask4 { 1422215976Sjmallett uint64_t u64; 1423232812Sjmallett struct cvmx_lmcx_char_mask4_s { 1424232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1425215976Sjmallett uint64_t reserved_33_63 : 31; 1426215976Sjmallett uint64_t reset_n_mask : 1; /**< Mask for RESET_L */ 1427215976Sjmallett uint64_t a_mask : 16; /**< Mask for A[15:0] */ 1428215976Sjmallett uint64_t ba_mask : 3; /**< Mask for BA[2:0] */ 1429215976Sjmallett uint64_t we_n_mask : 1; /**< Mask for WE_N */ 1430215976Sjmallett uint64_t cas_n_mask : 1; /**< Mask for CAS_N */ 1431215976Sjmallett uint64_t ras_n_mask : 1; /**< Mask for RAS_N */ 1432215976Sjmallett uint64_t odt1_mask : 2; /**< Mask for ODT1 */ 1433215976Sjmallett uint64_t odt0_mask : 2; /**< Mask for ODT0 */ 1434215976Sjmallett uint64_t cs1_n_mask : 2; /**< Mask for CS1_N */ 1435215976Sjmallett uint64_t cs0_n_mask : 2; /**< Mask for CS0_N */ 1436215976Sjmallett uint64_t cke_mask : 2; /**< Mask for CKE* */ 1437215976Sjmallett#else 1438215976Sjmallett uint64_t cke_mask : 2; 1439215976Sjmallett uint64_t cs0_n_mask : 2; 1440215976Sjmallett uint64_t cs1_n_mask : 2; 1441215976Sjmallett uint64_t odt0_mask : 2; 1442215976Sjmallett uint64_t odt1_mask : 2; 1443215976Sjmallett uint64_t ras_n_mask : 1; 1444215976Sjmallett uint64_t cas_n_mask : 1; 1445215976Sjmallett uint64_t we_n_mask : 1; 1446215976Sjmallett uint64_t ba_mask : 3; 1447215976Sjmallett uint64_t a_mask : 16; 1448215976Sjmallett uint64_t reset_n_mask : 1; 1449215976Sjmallett uint64_t reserved_33_63 : 31; 1450215976Sjmallett#endif 1451215976Sjmallett } s; 1452232812Sjmallett struct cvmx_lmcx_char_mask4_s cn61xx; 1453215976Sjmallett struct cvmx_lmcx_char_mask4_s cn63xx; 1454215976Sjmallett struct cvmx_lmcx_char_mask4_s cn63xxp1; 1455232812Sjmallett struct cvmx_lmcx_char_mask4_s cn66xx; 1456232812Sjmallett struct cvmx_lmcx_char_mask4_s cn68xx; 1457232812Sjmallett struct cvmx_lmcx_char_mask4_s cn68xxp1; 1458232812Sjmallett struct cvmx_lmcx_char_mask4_s cnf71xx; 1459215976Sjmallett}; 1460215976Sjmalletttypedef union cvmx_lmcx_char_mask4 cvmx_lmcx_char_mask4_t; 1461215976Sjmallett 1462215976Sjmallett/** 1463215976Sjmallett * cvmx_lmc#_comp_ctl 1464215976Sjmallett * 1465215976Sjmallett * LMC_COMP_CTL = LMC Compensation control 1466215976Sjmallett * 1467215976Sjmallett */ 1468232812Sjmallettunion cvmx_lmcx_comp_ctl { 1469215976Sjmallett uint64_t u64; 1470232812Sjmallett struct cvmx_lmcx_comp_ctl_s { 1471232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1472215976Sjmallett uint64_t reserved_32_63 : 32; 1473215976Sjmallett uint64_t nctl_csr : 4; /**< Compensation control bits */ 1474215976Sjmallett uint64_t nctl_clk : 4; /**< Compensation control bits */ 1475215976Sjmallett uint64_t nctl_cmd : 4; /**< Compensation control bits */ 1476215976Sjmallett uint64_t nctl_dat : 4; /**< Compensation control bits */ 1477215976Sjmallett uint64_t pctl_csr : 4; /**< Compensation control bits */ 1478215976Sjmallett uint64_t pctl_clk : 4; /**< Compensation control bits */ 1479215976Sjmallett uint64_t reserved_0_7 : 8; 1480215976Sjmallett#else 1481215976Sjmallett uint64_t reserved_0_7 : 8; 1482215976Sjmallett uint64_t pctl_clk : 4; 1483215976Sjmallett uint64_t pctl_csr : 4; 1484215976Sjmallett uint64_t nctl_dat : 4; 1485215976Sjmallett uint64_t nctl_cmd : 4; 1486215976Sjmallett uint64_t nctl_clk : 4; 1487215976Sjmallett uint64_t nctl_csr : 4; 1488215976Sjmallett uint64_t reserved_32_63 : 32; 1489215976Sjmallett#endif 1490215976Sjmallett } s; 1491232812Sjmallett struct cvmx_lmcx_comp_ctl_cn30xx { 1492232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1493215976Sjmallett uint64_t reserved_32_63 : 32; 1494215976Sjmallett uint64_t nctl_csr : 4; /**< Compensation control bits */ 1495215976Sjmallett uint64_t nctl_clk : 4; /**< Compensation control bits */ 1496215976Sjmallett uint64_t nctl_cmd : 4; /**< Compensation control bits */ 1497215976Sjmallett uint64_t nctl_dat : 4; /**< Compensation control bits */ 1498215976Sjmallett uint64_t pctl_csr : 4; /**< Compensation control bits */ 1499215976Sjmallett uint64_t pctl_clk : 4; /**< Compensation control bits */ 1500215976Sjmallett uint64_t pctl_cmd : 4; /**< Compensation control bits */ 1501215976Sjmallett uint64_t pctl_dat : 4; /**< Compensation control bits */ 1502215976Sjmallett#else 1503215976Sjmallett uint64_t pctl_dat : 4; 1504215976Sjmallett uint64_t pctl_cmd : 4; 1505215976Sjmallett uint64_t pctl_clk : 4; 1506215976Sjmallett uint64_t pctl_csr : 4; 1507215976Sjmallett uint64_t nctl_dat : 4; 1508215976Sjmallett uint64_t nctl_cmd : 4; 1509215976Sjmallett uint64_t nctl_clk : 4; 1510215976Sjmallett uint64_t nctl_csr : 4; 1511215976Sjmallett uint64_t reserved_32_63 : 32; 1512215976Sjmallett#endif 1513215976Sjmallett } cn30xx; 1514215976Sjmallett struct cvmx_lmcx_comp_ctl_cn30xx cn31xx; 1515215976Sjmallett struct cvmx_lmcx_comp_ctl_cn30xx cn38xx; 1516215976Sjmallett struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2; 1517232812Sjmallett struct cvmx_lmcx_comp_ctl_cn50xx { 1518232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1519215976Sjmallett uint64_t reserved_32_63 : 32; 1520215976Sjmallett uint64_t nctl_csr : 4; /**< Compensation control bits */ 1521215976Sjmallett uint64_t reserved_20_27 : 8; 1522215976Sjmallett uint64_t nctl_dat : 4; /**< Compensation control bits */ 1523215976Sjmallett uint64_t pctl_csr : 4; /**< Compensation control bits */ 1524215976Sjmallett uint64_t reserved_5_11 : 7; 1525215976Sjmallett uint64_t pctl_dat : 5; /**< Compensation control bits */ 1526215976Sjmallett#else 1527215976Sjmallett uint64_t pctl_dat : 5; 1528215976Sjmallett uint64_t reserved_5_11 : 7; 1529215976Sjmallett uint64_t pctl_csr : 4; 1530215976Sjmallett uint64_t nctl_dat : 4; 1531215976Sjmallett uint64_t reserved_20_27 : 8; 1532215976Sjmallett uint64_t nctl_csr : 4; 1533215976Sjmallett uint64_t reserved_32_63 : 32; 1534215976Sjmallett#endif 1535215976Sjmallett } cn50xx; 1536215976Sjmallett struct cvmx_lmcx_comp_ctl_cn50xx cn52xx; 1537215976Sjmallett struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1; 1538215976Sjmallett struct cvmx_lmcx_comp_ctl_cn50xx cn56xx; 1539215976Sjmallett struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1; 1540215976Sjmallett struct cvmx_lmcx_comp_ctl_cn50xx cn58xx; 1541232812Sjmallett struct cvmx_lmcx_comp_ctl_cn58xxp1 { 1542232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1543215976Sjmallett uint64_t reserved_32_63 : 32; 1544215976Sjmallett uint64_t nctl_csr : 4; /**< Compensation control bits */ 1545215976Sjmallett uint64_t reserved_20_27 : 8; 1546215976Sjmallett uint64_t nctl_dat : 4; /**< Compensation control bits */ 1547215976Sjmallett uint64_t pctl_csr : 4; /**< Compensation control bits */ 1548215976Sjmallett uint64_t reserved_4_11 : 8; 1549215976Sjmallett uint64_t pctl_dat : 4; /**< Compensation control bits */ 1550215976Sjmallett#else 1551215976Sjmallett uint64_t pctl_dat : 4; 1552215976Sjmallett uint64_t reserved_4_11 : 8; 1553215976Sjmallett uint64_t pctl_csr : 4; 1554215976Sjmallett uint64_t nctl_dat : 4; 1555215976Sjmallett uint64_t reserved_20_27 : 8; 1556215976Sjmallett uint64_t nctl_csr : 4; 1557215976Sjmallett uint64_t reserved_32_63 : 32; 1558215976Sjmallett#endif 1559215976Sjmallett } cn58xxp1; 1560215976Sjmallett}; 1561215976Sjmalletttypedef union cvmx_lmcx_comp_ctl cvmx_lmcx_comp_ctl_t; 1562215976Sjmallett 1563215976Sjmallett/** 1564215976Sjmallett * cvmx_lmc#_comp_ctl2 1565215976Sjmallett * 1566215976Sjmallett * LMC_COMP_CTL2 = LMC Compensation control 1567215976Sjmallett * 1568215976Sjmallett */ 1569232812Sjmallettunion cvmx_lmcx_comp_ctl2 { 1570215976Sjmallett uint64_t u64; 1571232812Sjmallett struct cvmx_lmcx_comp_ctl2_s { 1572232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1573215976Sjmallett uint64_t reserved_34_63 : 30; 1574215976Sjmallett uint64_t ddr__ptune : 4; /**< DDR PCTL from compensation circuit 1575215976Sjmallett The encoded value provides debug information for the 1576215976Sjmallett compensation impedance on P-pullup */ 1577215976Sjmallett uint64_t ddr__ntune : 4; /**< DDR NCTL from compensation circuit 1578215976Sjmallett The encoded value provides debug information for the 1579215976Sjmallett compensation impedance on N-pulldown */ 1580215976Sjmallett uint64_t m180 : 1; /**< Cap impedance at 180 Ohm (instead of 240 Ohm) */ 1581215976Sjmallett uint64_t byp : 1; /**< Bypass mode 1582215976Sjmallett When set, PTUNE,NTUNE are the compensation setting. 1583215976Sjmallett When clear, DDR_PTUNE,DDR_NTUNE are the compensation setting. */ 1584215976Sjmallett uint64_t ptune : 4; /**< PCTL impedance control in bypass mode */ 1585215976Sjmallett uint64_t ntune : 4; /**< NCTL impedance control in bypass mode */ 1586215976Sjmallett uint64_t rodt_ctl : 4; /**< NCTL RODT impedance control bits 1587215976Sjmallett This field controls ODT values during a memory read 1588215976Sjmallett on the Octeon side 1589215976Sjmallett 0000 = No ODT 1590215976Sjmallett 0001 = 20 ohm 1591215976Sjmallett 0010 = 30 ohm 1592215976Sjmallett 0011 = 40 ohm 1593215976Sjmallett 0100 = 60 ohm 1594215976Sjmallett 0101 = 120 ohm 1595215976Sjmallett 0110-1111 = Reserved */ 1596232812Sjmallett uint64_t cmd_ctl : 4; /**< Drive strength control for CMD/A/RESET_L drivers 1597215976Sjmallett 0001 = 24 ohm 1598215976Sjmallett 0010 = 26.67 ohm 1599215976Sjmallett 0011 = 30 ohm 1600215976Sjmallett 0100 = 34.3 ohm 1601215976Sjmallett 0101 = 40 ohm 1602215976Sjmallett 0110 = 48 ohm 1603215976Sjmallett 0111 = 60 ohm 1604215976Sjmallett 0000,1000-1111 = Reserved */ 1605232812Sjmallett uint64_t ck_ctl : 4; /**< Drive strength control for CK/CS*_L/ODT/CKE* drivers 1606215976Sjmallett 0001 = 24 ohm 1607215976Sjmallett 0010 = 26.67 ohm 1608215976Sjmallett 0011 = 30 ohm 1609215976Sjmallett 0100 = 34.3 ohm 1610215976Sjmallett 0101 = 40 ohm 1611215976Sjmallett 0110 = 48 ohm 1612215976Sjmallett 0111 = 60 ohm 1613215976Sjmallett 0000,1000-1111 = Reserved */ 1614215976Sjmallett uint64_t dqx_ctl : 4; /**< Drive strength control for DQ/DQS drivers 1615215976Sjmallett 0001 = 24 ohm 1616215976Sjmallett 0010 = 26.67 ohm 1617215976Sjmallett 0011 = 30 ohm 1618215976Sjmallett 0100 = 34.3 ohm 1619215976Sjmallett 0101 = 40 ohm 1620215976Sjmallett 0110 = 48 ohm 1621215976Sjmallett 0111 = 60 ohm 1622215976Sjmallett 0000,1000-1111 = Reserved */ 1623215976Sjmallett#else 1624215976Sjmallett uint64_t dqx_ctl : 4; 1625215976Sjmallett uint64_t ck_ctl : 4; 1626215976Sjmallett uint64_t cmd_ctl : 4; 1627215976Sjmallett uint64_t rodt_ctl : 4; 1628215976Sjmallett uint64_t ntune : 4; 1629215976Sjmallett uint64_t ptune : 4; 1630215976Sjmallett uint64_t byp : 1; 1631215976Sjmallett uint64_t m180 : 1; 1632215976Sjmallett uint64_t ddr__ntune : 4; 1633215976Sjmallett uint64_t ddr__ptune : 4; 1634215976Sjmallett uint64_t reserved_34_63 : 30; 1635215976Sjmallett#endif 1636215976Sjmallett } s; 1637232812Sjmallett struct cvmx_lmcx_comp_ctl2_s cn61xx; 1638215976Sjmallett struct cvmx_lmcx_comp_ctl2_s cn63xx; 1639215976Sjmallett struct cvmx_lmcx_comp_ctl2_s cn63xxp1; 1640232812Sjmallett struct cvmx_lmcx_comp_ctl2_s cn66xx; 1641232812Sjmallett struct cvmx_lmcx_comp_ctl2_s cn68xx; 1642232812Sjmallett struct cvmx_lmcx_comp_ctl2_s cn68xxp1; 1643232812Sjmallett struct cvmx_lmcx_comp_ctl2_s cnf71xx; 1644215976Sjmallett}; 1645215976Sjmalletttypedef union cvmx_lmcx_comp_ctl2 cvmx_lmcx_comp_ctl2_t; 1646215976Sjmallett 1647215976Sjmallett/** 1648215976Sjmallett * cvmx_lmc#_config 1649215976Sjmallett * 1650215976Sjmallett * LMC_CONFIG = LMC Configuration Register 1651215976Sjmallett * 1652215976Sjmallett * This register controls certain parameters of Memory Configuration 1653215976Sjmallett * 1654215976Sjmallett * Notes: 1655232812Sjmallett * a. Priority order for hardware writes to LMC*_CONFIG/LMC*_FADR/LMC*_SCRAMBLED_FADR/LMC*_ECC_SYND: DED error >= NXM error > SEC error 1656215976Sjmallett * b. The self refresh entry sequence(s) power the DLL up/down (depending on LMC*_MODEREG_PARAMS0[DLL]) 1657215976Sjmallett * when LMC*_CONFIG[SREF_WITH_DLL] is set 1658215976Sjmallett * c. Prior to the self-refresh exit sequence, LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 should be re-programmed (if needed) to the 1659215976Sjmallett * appropriate values 1660215976Sjmallett * 1661215976Sjmallett * LMC Bringup Sequence: 1662215976Sjmallett * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized. 1663215976Sjmallett * 2. Write LMC*_COMP_CTL2, LMC*_CONTROL, LMC*_WODT_MASK, LMC*_DUAL_MEMCFG, LMC*_TIMING_PARAMS0, LMC*_TIMING_PARAMS1, 1664215976Sjmallett * LMC*_MODEREG_PARAMS0, LMC*_MODEREG_PARAMS1, LMC*_RESET_CTL (with DDR3RST=0), LMC*_CONFIG (with INIT_START=0) 1665215976Sjmallett * with appropriate values, if necessary. 1666215976Sjmallett * 3. Wait 200us, then write LMC*_RESET_CTL[DDR3RST] = 1. 1667215976Sjmallett * 4. Initialize all ranks at once by writing LMC*_CONFIG[RANKMASK][n] = 1, LMC*_CONFIG[INIT_STATUS][n] = 1, and LMC*_CONFIG[INIT_START] = 1 1668215976Sjmallett * where n is a valid rank index for the specific board configuration. 1669215976Sjmallett * 5. for each rank n to be write-leveled [ 1670215976Sjmallett * if auto write-leveling is desired [ 1671215976Sjmallett * write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_WLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1 1672215976Sjmallett * wait until LMC*_WLEVEL_RANKn[STATUS] = 3 1673215976Sjmallett * ] else [ 1674215976Sjmallett * write LMC*_WLEVEL_RANKn with appropriate values 1675215976Sjmallett * ] 1676215976Sjmallett * ] 1677215976Sjmallett * 6. for each rank n to be read-leveled [ 1678215976Sjmallett * if auto read-leveling is desired [ 1679215976Sjmallett * write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_RLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1 1680215976Sjmallett * wait until LMC*_RLEVEL_RANKn[STATUS] = 3 1681215976Sjmallett * ] else [ 1682215976Sjmallett * write LMC*_RLEVEL_RANKn with appropriate values 1683215976Sjmallett * ] 1684215976Sjmallett * ] 1685215976Sjmallett */ 1686232812Sjmallettunion cvmx_lmcx_config { 1687215976Sjmallett uint64_t u64; 1688232812Sjmallett struct cvmx_lmcx_config_s { 1689232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1690232812Sjmallett uint64_t reserved_61_63 : 3; 1691232812Sjmallett uint64_t mode32b : 1; /**< 32b Datapath Mode NS 1692232812Sjmallett Set to 1 if we use only 32 DQ pins 1693232812Sjmallett 0 for 64b DQ mode. */ 1694232812Sjmallett uint64_t scrz : 1; /**< Hide LMC*_SCRAMBLE_CFG0 and LMC*_SCRAMBLE_CFG1 when set */ 1695232812Sjmallett uint64_t early_unload_d1_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 3 1696232812Sjmallett reads 1697232812Sjmallett The recommended EARLY_UNLOAD_D1_R1 value can be calculated 1698232812Sjmallett after the final LMC*_RLEVEL_RANK3[BYTE*] values are 1699232812Sjmallett selected (as part of read-leveling initialization). 1700232812Sjmallett Then, determine the largest read-leveling setting 1701232812Sjmallett for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi]) 1702232812Sjmallett across all i), then set EARLY_UNLOAD_D1_R1 1703232812Sjmallett when the low two bits of this largest setting is not 1704232812Sjmallett 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */ 1705232812Sjmallett uint64_t early_unload_d1_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 2 1706232812Sjmallett reads 1707232812Sjmallett The recommended EARLY_UNLOAD_D1_RO value can be calculated 1708232812Sjmallett after the final LMC*_RLEVEL_RANK2[BYTE*] values are 1709232812Sjmallett selected (as part of read-leveling initialization). 1710232812Sjmallett Then, determine the largest read-leveling setting 1711232812Sjmallett for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi]) 1712232812Sjmallett across all i), then set EARLY_UNLOAD_D1_RO 1713232812Sjmallett when the low two bits of this largest setting is not 1714232812Sjmallett 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */ 1715232812Sjmallett uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1 1716232812Sjmallett reads 1717232812Sjmallett The recommended EARLY_UNLOAD_D0_R1 value can be calculated 1718232812Sjmallett after the final LMC*_RLEVEL_RANK1[BYTE*] values are 1719232812Sjmallett selected (as part of read-leveling initialization). 1720232812Sjmallett Then, determine the largest read-leveling setting 1721232812Sjmallett for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi]) 1722232812Sjmallett across all i), then set EARLY_UNLOAD_D0_R1 1723232812Sjmallett when the low two bits of this largest setting is not 1724232812Sjmallett 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */ 1725232812Sjmallett uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0 1726232812Sjmallett reads. 1727232812Sjmallett The recommended EARLY_UNLOAD_D0_R0 value can be calculated 1728232812Sjmallett after the final LMC*_RLEVEL_RANK0[BYTE*] values are 1729232812Sjmallett selected (as part of read-leveling initialization). 1730232812Sjmallett Then, determine the largest read-leveling setting 1731232812Sjmallett for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi]) 1732232812Sjmallett across all i), then set EARLY_UNLOAD_D0_R0 1733232812Sjmallett when the low two bits of this largest setting is not 1734232812Sjmallett 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */ 1735232812Sjmallett uint64_t init_status : 4; /**< Indicates status of initialization 1736232812Sjmallett INIT_STATUS[n] = 1 implies rank n has been initialized 1737232812Sjmallett SW must set necessary INIT_STATUS bits with the 1738232812Sjmallett same LMC*_CONFIG write that initiates 1739232812Sjmallett power-up/init and self-refresh exit sequences 1740232812Sjmallett (if the required INIT_STATUS bits are not already 1741232812Sjmallett set before LMC initiates the sequence). 1742232812Sjmallett INIT_STATUS determines the chip-selects that assert 1743232812Sjmallett during refresh, ZQCS, and precharge power-down and 1744232812Sjmallett self-refresh entry/exit SEQUENCE's. */ 1745232812Sjmallett uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored. 1746232812Sjmallett MIRRMASK<n> = 1 means Rank n addresses are mirrored 1747232812Sjmallett for 0 <= n <= 3 1748232812Sjmallett A mirrored read/write has these differences: 1749232812Sjmallett - DDR_BA<1> is swapped with DDR_BA<0> 1750232812Sjmallett - DDR_A<8> is swapped with DDR_A<7> 1751232812Sjmallett - DDR_A<6> is swapped with DDR_A<5> 1752232812Sjmallett - DDR_A<4> is swapped with DDR_A<3> 1753232812Sjmallett When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */ 1754232812Sjmallett uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized. 1755232812Sjmallett To write-level/read-level/initialize rank i, set RANKMASK<i> 1756232812Sjmallett RANK_ENA=1 RANK_ENA=0 1757232812Sjmallett RANKMASK<0> = DIMM0_CS0 DIMM0_CS0 1758232812Sjmallett RANKMASK<1> = DIMM0_CS1 MBZ 1759232812Sjmallett RANKMASK<2> = DIMM1_CS0 DIMM1_CS0 1760232812Sjmallett RANKMASK<3> = DIMM1_CS1 MBZ 1761232812Sjmallett For read/write leveling, each rank has to be leveled separately, 1762232812Sjmallett so RANKMASK should only have one bit set. 1763232812Sjmallett RANKMASK is not used during self-refresh entry/exit and 1764232812Sjmallett precharge power-down entry/exit instruction sequences. 1765232812Sjmallett When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */ 1766232812Sjmallett uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs) 1767232812Sjmallett For dual-rank DIMMs, the rank_ena bit will enable 1768232812Sjmallett the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the 1769232812Sjmallett (pbank_lsb-1) address bit. 1770232812Sjmallett Write 0 for SINGLE ranked DIMM's. */ 1771232812Sjmallett uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2 1772232812Sjmallett When set, self-refresh entry and exit instruction sequences 1773232812Sjmallett write MR1 and MR2 (in all ranks). (The writes occur before 1774232812Sjmallett self-refresh entry, and after self-refresh exit.) 1775232812Sjmallett When clear, self-refresh entry and exit instruction sequences 1776232812Sjmallett do not write any registers in the DDR3 parts. */ 1777232812Sjmallett uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when 1778232812Sjmallett the shortest DQx lines have a larger delay than the CK line */ 1779232812Sjmallett uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1 1780232812Sjmallett transition on LMC*_CONFIG[INIT_START]. 1781232812Sjmallett SEQUENCE=0=power-up/init: 1782232812Sjmallett - RANKMASK selects participating ranks (should be all ranks with attached DRAM) 1783232812Sjmallett - INIT_STATUS must equal RANKMASK 1784232812Sjmallett - DDR_DIMM*_CKE signals activated (if they weren't already active) 1785232812Sjmallett - RDIMM register control words 0-15 will be written to RANKMASK-selected 1786232812Sjmallett RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding 1787232812Sjmallett LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and 1788232812Sjmallett LMC*_DIMM_CTL descriptions below for more details.) 1789232812Sjmallett - MR0, MR1, MR2, and MR3 will be written to selected ranks 1790232812Sjmallett SEQUENCE=1=read-leveling: 1791232812Sjmallett - RANKMASK selects the rank to be read-leveled 1792232812Sjmallett - MR3 written to selected rank 1793232812Sjmallett SEQUENCE=2=self-refresh entry: 1794232812Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 1795232812Sjmallett - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1 1796232812Sjmallett - DDR_DIMM*_CKE signals de-activated 1797232812Sjmallett SEQUENCE=3=self-refresh exit: 1798232812Sjmallett - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM) 1799232812Sjmallett - DDR_DIMM*_CKE signals activated 1800232812Sjmallett - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1 1801232812Sjmallett SEQUENCE=4=precharge power-down entry: 1802232812Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 1803232812Sjmallett - DDR_DIMM*_CKE signals de-activated 1804232812Sjmallett SEQUENCE=5=precharge power-down exit: 1805232812Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 1806232812Sjmallett - DDR_DIMM*_CKE signals activated 1807232812Sjmallett SEQUENCE=6=write-leveling: 1808232812Sjmallett - RANKMASK selects the rank to be write-leveled 1809232812Sjmallett - INIT_STATUS must indicate all ranks with attached DRAM 1810232812Sjmallett - MR1 and MR2 written to INIT_STATUS-selected ranks 1811232812Sjmallett SEQUENCE=7=illegal 1812232812Sjmallett Precharge power-down entry and exit SEQUENCE's may also 1813232812Sjmallett be automatically generated by the HW when IDLEPOWER!=0. 1814232812Sjmallett Self-refresh entry SEQUENCE's may also be automatically 1815232812Sjmallett generated by hardware upon a chip warm or soft reset 1816232812Sjmallett sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set. 1817232812Sjmallett LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values 1818232812Sjmallett to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences. 1819232812Sjmallett Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details. 1820232812Sjmallett If there are two consecutive power-up/init's without 1821232812Sjmallett a DRESET assertion between them, LMC asserts DDR_DIMM*_CKE as part of 1822232812Sjmallett the first power-up/init, and continues to assert DDR_DIMM*_CKE 1823232812Sjmallett through the remainder of the first and the second power-up/init. 1824232812Sjmallett If DDR_DIMM*_CKE deactivation and reactivation is needed for 1825232812Sjmallett a second power-up/init, a DRESET assertion is required 1826232812Sjmallett between the first and the second. */ 1827232812Sjmallett uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle 1828232812Sjmallett increments. A Refresh sequence is triggered when bits 1829232812Sjmallett [24:18] are equal to 0, and a ZQCS sequence is triggered 1830232812Sjmallett when [36:18] are equal to 0. 1831232812Sjmallett Program [24:18] to RND-DN(tREFI/clkPeriod/512) 1832232812Sjmallett Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note 1833232812Sjmallett that this value should always be greater than 32, to account for 1834232812Sjmallett resistor calibration delays. 1835232812Sjmallett 000_00000000_00000000: RESERVED 1836232812Sjmallett Max Refresh interval = 127 * 512 = 65024 CKs 1837232812Sjmallett Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK 1838232812Sjmallett LMC*_CONFIG[INIT_STATUS] determines which ranks receive 1839232812Sjmallett the REF / ZQCS. LMC does not send any refreshes / ZQCS's 1840232812Sjmallett when LMC*_CONFIG[INIT_STATUS]=0. */ 1841232812Sjmallett uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter, 1842232812Sjmallett and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT 1843232812Sjmallett CSR's. SW should write this to a one, then re-write 1844232812Sjmallett it to a zero to cause the reset. */ 1845232812Sjmallett uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 1846232812Sjmallett 0=disabled, 1=enabled */ 1847232812Sjmallett uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after 1848232812Sjmallett having waited for 2^FORCEWRITE CK cycles. 0=disabled. */ 1849232812Sjmallett uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory 1850232812Sjmallett controller has been idle for 2^(2+IDLEPOWER) CK cycles. 1851232812Sjmallett 0=disabled. 1852232812Sjmallett This field should only be programmed after initialization. 1853232812Sjmallett LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL 1854232812Sjmallett is disabled during the precharge power-down. */ 1855232812Sjmallett uint64_t pbank_lsb : 4; /**< DIMM address bit select 1856232812Sjmallett Reverting to the explanation for ROW_LSB, 1857232812Sjmallett PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits 1858232812Sjmallett In the 512MB DIMM Example, assuming no rank bits: 1859232812Sjmallett pbank_lsb=mem_addr[15+13] for 64b mode 1860232812Sjmallett =mem_addr[14+13] for 32b mode 1861232812Sjmallett Decoding for pbank_lsb 1862232812Sjmallett - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) 1863232812Sjmallett - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " 1864232812Sjmallett - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " 1865232812Sjmallett - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " 1866232812Sjmallett - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " 1867232812Sjmallett - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " 1868232812Sjmallett - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " 1869232812Sjmallett - 0111:DIMM = 0 / rank = mem_adr[34] " 1870232812Sjmallett - 1000-1111: RESERVED 1871232812Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 1872232812Sjmallett DDR3 parts, the column address width = 10, so with 1873232812Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] 1874232812Sjmallett With rank_ena = 0, pbank_lsb = 2 1875232812Sjmallett With rank_ena = 1, pbank_lsb = 3 */ 1876232812Sjmallett uint64_t row_lsb : 3; /**< Row Address bit select 1877232812Sjmallett Encoding used to determine which memory address 1878232812Sjmallett bit position represents the low order DDR ROW address. 1879232812Sjmallett The processor's memory address[34:7] needs to be 1880232812Sjmallett translated to DRAM addresses (bnk,row,col,rank and DIMM) 1881232812Sjmallett and that is a function of the following: 1882232812Sjmallett 1. Datapath Width (64 or 32) 1883232812Sjmallett 2. \# Banks (8) 1884232812Sjmallett 3. \# Column Bits of the memory part - spec'd indirectly 1885232812Sjmallett by this register. 1886232812Sjmallett 4. \# Row Bits of the memory part - spec'd indirectly 1887232812Sjmallett 5. \# Ranks in a DIMM - spec'd by RANK_ENA 1888232812Sjmallett 6. \# DIMM's in the system by the register below (PBANK_LSB). 1889232812Sjmallett Col Address starts from mem_addr[2] for 32b (4Bytes) 1890232812Sjmallett dq width or from mem_addr[3] for 64b (8Bytes) dq width 1891232812Sjmallett \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for 1892232812Sjmallett 64bmode or mem_adr[14] for 32b mode. Hence row_lsb 1893232812Sjmallett parameter should be set to 001 (64b) or 000 (32b). 1894232812Sjmallett Decoding for row_lsb 1895232812Sjmallett - 000: row_lsb = mem_adr[14] 1896232812Sjmallett - 001: row_lsb = mem_adr[15] 1897232812Sjmallett - 010: row_lsb = mem_adr[16] 1898232812Sjmallett - 011: row_lsb = mem_adr[17] 1899232812Sjmallett - 100: row_lsb = mem_adr[18] 1900232812Sjmallett - 101: row_lsb = mem_adr[19] 1901232812Sjmallett - 110: row_lsb = mem_adr[20] 1902232812Sjmallett - 111: RESERVED 1903232812Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 1904232812Sjmallett DDR3 parts, the column address width = 10, so with 1905232812Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */ 1906232812Sjmallett uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC 1907232812Sjmallett check/correct logic. Should be 1 when used with DIMMs 1908232812Sjmallett with ECC. 0, otherwise. 1909232812Sjmallett When this mode is turned on, DQ[71:64] 1910232812Sjmallett on writes, will contain the ECC code generated for 1911232812Sjmallett the 64 bits of data which will 1912232812Sjmallett written in the memory and then later on reads, used 1913232812Sjmallett to check for Single bit error (which will be auto- 1914232812Sjmallett corrected) and Double Bit error (which will be 1915232812Sjmallett reported). When not turned on, DQ[71:64] 1916232812Sjmallett are driven to 0. Please refer to SEC_ERR, DED_ERR, 1917232812Sjmallett LMC*_FADR, LMC*_SCRAMBLED_FADR and LMC*_ECC_SYND registers 1918232812Sjmallett for diagnostics information when there is an error. */ 1919232812Sjmallett uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is 1920232812Sjmallett selected by LMC*_CONFIG[SEQUENCE]. This register is a 1921232812Sjmallett oneshot and clears itself each time it is set. */ 1922232812Sjmallett#else 1923232812Sjmallett uint64_t init_start : 1; 1924232812Sjmallett uint64_t ecc_ena : 1; 1925232812Sjmallett uint64_t row_lsb : 3; 1926232812Sjmallett uint64_t pbank_lsb : 4; 1927232812Sjmallett uint64_t idlepower : 3; 1928232812Sjmallett uint64_t forcewrite : 4; 1929232812Sjmallett uint64_t ecc_adr : 1; 1930232812Sjmallett uint64_t reset : 1; 1931232812Sjmallett uint64_t ref_zqcs_int : 19; 1932232812Sjmallett uint64_t sequence : 3; 1933232812Sjmallett uint64_t early_dqx : 1; 1934232812Sjmallett uint64_t sref_with_dll : 1; 1935232812Sjmallett uint64_t rank_ena : 1; 1936232812Sjmallett uint64_t rankmask : 4; 1937232812Sjmallett uint64_t mirrmask : 4; 1938232812Sjmallett uint64_t init_status : 4; 1939232812Sjmallett uint64_t early_unload_d0_r0 : 1; 1940232812Sjmallett uint64_t early_unload_d0_r1 : 1; 1941232812Sjmallett uint64_t early_unload_d1_r0 : 1; 1942232812Sjmallett uint64_t early_unload_d1_r1 : 1; 1943232812Sjmallett uint64_t scrz : 1; 1944232812Sjmallett uint64_t mode32b : 1; 1945232812Sjmallett uint64_t reserved_61_63 : 3; 1946232812Sjmallett#endif 1947232812Sjmallett } s; 1948232812Sjmallett struct cvmx_lmcx_config_s cn61xx; 1949232812Sjmallett struct cvmx_lmcx_config_cn63xx { 1950232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 1951215976Sjmallett uint64_t reserved_59_63 : 5; 1952215976Sjmallett uint64_t early_unload_d1_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 3 1953215976Sjmallett reads 1954215976Sjmallett The recommended EARLY_UNLOAD_D1_R1 value can be calculated 1955215976Sjmallett after the final LMC*_RLEVEL_RANK3[BYTE*] values are 1956215976Sjmallett selected (as part of read-leveling initialization). 1957215976Sjmallett Then, determine the largest read-leveling setting 1958215976Sjmallett for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi]) 1959215976Sjmallett across all i), then set EARLY_UNLOAD_D1_R1 1960215976Sjmallett when the low two bits of this largest setting is not 1961215976Sjmallett 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */ 1962215976Sjmallett uint64_t early_unload_d1_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 2 1963215976Sjmallett reads 1964215976Sjmallett The recommended EARLY_UNLOAD_D1_RO value can be calculated 1965215976Sjmallett after the final LMC*_RLEVEL_RANK2[BYTE*] values are 1966215976Sjmallett selected (as part of read-leveling initialization). 1967215976Sjmallett Then, determine the largest read-leveling setting 1968215976Sjmallett for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi]) 1969215976Sjmallett across all i), then set EARLY_UNLOAD_D1_RO 1970215976Sjmallett when the low two bits of this largest setting is not 1971215976Sjmallett 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */ 1972215976Sjmallett uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1 1973215976Sjmallett reads 1974215976Sjmallett The recommended EARLY_UNLOAD_D0_R1 value can be calculated 1975215976Sjmallett after the final LMC*_RLEVEL_RANK1[BYTE*] values are 1976215976Sjmallett selected (as part of read-leveling initialization). 1977215976Sjmallett Then, determine the largest read-leveling setting 1978215976Sjmallett for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi]) 1979215976Sjmallett across all i), then set EARLY_UNLOAD_D0_R1 1980215976Sjmallett when the low two bits of this largest setting is not 1981215976Sjmallett 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */ 1982215976Sjmallett uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0 1983215976Sjmallett reads. 1984215976Sjmallett The recommended EARLY_UNLOAD_D0_R0 value can be calculated 1985215976Sjmallett after the final LMC*_RLEVEL_RANK0[BYTE*] values are 1986215976Sjmallett selected (as part of read-leveling initialization). 1987215976Sjmallett Then, determine the largest read-leveling setting 1988215976Sjmallett for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi]) 1989215976Sjmallett across all i), then set EARLY_UNLOAD_D0_R0 1990215976Sjmallett when the low two bits of this largest setting is not 1991215976Sjmallett 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */ 1992215976Sjmallett uint64_t init_status : 4; /**< Indicates status of initialization 1993215976Sjmallett INIT_STATUS[n] = 1 implies rank n has been initialized 1994215976Sjmallett SW must set necessary INIT_STATUS bits with the 1995215976Sjmallett same LMC*_CONFIG write that initiates 1996215976Sjmallett power-up/init and self-refresh exit sequences 1997215976Sjmallett (if the required INIT_STATUS bits are not already 1998215976Sjmallett set before LMC initiates the sequence). 1999215976Sjmallett INIT_STATUS determines the chip-selects that assert 2000215976Sjmallett during refresh, ZQCS, and precharge power-down and 2001215976Sjmallett self-refresh entry/exit SEQUENCE's. */ 2002215976Sjmallett uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored. 2003215976Sjmallett MIRRMASK<n> = 1 means Rank n addresses are mirrored 2004215976Sjmallett for 0 <= n <= 3 2005215976Sjmallett A mirrored read/write has these differences: 2006215976Sjmallett - DDR_BA<1> is swapped with DDR_BA<0> 2007215976Sjmallett - DDR_A<8> is swapped with DDR_A<7> 2008215976Sjmallett - DDR_A<6> is swapped with DDR_A<5> 2009215976Sjmallett - DDR_A<4> is swapped with DDR_A<3> 2010215976Sjmallett When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */ 2011215976Sjmallett uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized. 2012215976Sjmallett To write-level/read-level/initialize rank i, set RANKMASK<i> 2013215976Sjmallett RANK_ENA=1 RANK_ENA=0 2014215976Sjmallett RANKMASK<0> = DIMM0_CS0 DIMM0_CS0 2015215976Sjmallett RANKMASK<1> = DIMM0_CS1 MBZ 2016215976Sjmallett RANKMASK<2> = DIMM1_CS0 DIMM1_CS0 2017215976Sjmallett RANKMASK<3> = DIMM1_CS1 MBZ 2018215976Sjmallett For read/write leveling, each rank has to be leveled separately, 2019215976Sjmallett so RANKMASK should only have one bit set. 2020215976Sjmallett RANKMASK is not used during self-refresh entry/exit and 2021215976Sjmallett precharge power-down entry/exit instruction sequences. 2022215976Sjmallett When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */ 2023215976Sjmallett uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs) 2024215976Sjmallett For dual-rank DIMMs, the rank_ena bit will enable 2025215976Sjmallett the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the 2026215976Sjmallett (pbank_lsb-1) address bit. 2027215976Sjmallett Write 0 for SINGLE ranked DIMM's. */ 2028215976Sjmallett uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2 2029215976Sjmallett When set, self-refresh entry and exit instruction sequences 2030215976Sjmallett write MR1 and MR2 (in all ranks). (The writes occur before 2031215976Sjmallett self-refresh entry, and after self-refresh exit.) 2032215976Sjmallett When clear, self-refresh entry and exit instruction sequences 2033215976Sjmallett do not write any registers in the DDR3 parts. */ 2034215976Sjmallett uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when 2035215976Sjmallett the shortest DQx lines have a larger delay than the CK line */ 2036215976Sjmallett uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1 2037215976Sjmallett transition on LMC*_CONFIG[INIT_START]. 2038215976Sjmallett SEQUENCE=0=power-up/init: 2039215976Sjmallett - RANKMASK selects participating ranks (should be all ranks with attached DRAM) 2040215976Sjmallett - INIT_STATUS must equal RANKMASK 2041215976Sjmallett - DDR_CKE* signals activated (if they weren't already active) 2042215976Sjmallett - RDIMM register control words 0-15 will be written to RANKMASK-selected 2043215976Sjmallett RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding 2044215976Sjmallett LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and 2045215976Sjmallett LMC*_DIMM_CTL descriptions below for more details.) 2046215976Sjmallett - MR0, MR1, MR2, and MR3 will be written to selected ranks 2047215976Sjmallett SEQUENCE=1=read-leveling: 2048215976Sjmallett - RANKMASK selects the rank to be read-leveled 2049215976Sjmallett - MR3 written to selected rank 2050215976Sjmallett SEQUENCE=2=self-refresh entry: 2051215976Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2052215976Sjmallett - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1 2053215976Sjmallett - DDR_CKE* signals de-activated 2054215976Sjmallett SEQUENCE=3=self-refresh exit: 2055215976Sjmallett - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM) 2056215976Sjmallett - DDR_CKE* signals activated 2057215976Sjmallett - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1 2058215976Sjmallett SEQUENCE=4=precharge power-down entry: 2059215976Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2060215976Sjmallett - DDR_CKE* signals de-activated 2061215976Sjmallett SEQUENCE=5=precharge power-down exit: 2062215976Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2063215976Sjmallett - DDR_CKE* signals activated 2064215976Sjmallett SEQUENCE=6=write-leveling: 2065215976Sjmallett - RANKMASK selects the rank to be write-leveled 2066215976Sjmallett - INIT_STATUS must indicate all ranks with attached DRAM 2067215976Sjmallett - MR1 and MR2 written to INIT_STATUS-selected ranks 2068215976Sjmallett SEQUENCE=7=illegal 2069215976Sjmallett Precharge power-down entry and exit SEQUENCE's may also 2070215976Sjmallett be automatically generated by the HW when IDLEPOWER!=0. 2071215976Sjmallett Self-refresh entry SEQUENCE's may also be automatically 2072215976Sjmallett generated by hardware upon a chip warm or soft reset 2073215976Sjmallett sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set. 2074215976Sjmallett LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values 2075215976Sjmallett to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences. 2076215976Sjmallett Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details. 2077215976Sjmallett If there are two consecutive power-up/init's without 2078215976Sjmallett a DRESET assertion between them, LMC asserts DDR_CKE* as part of 2079215976Sjmallett the first power-up/init, and continues to assert DDR_CKE* 2080215976Sjmallett through the remainder of the first and the second power-up/init. 2081215976Sjmallett If DDR_CKE* deactivation and reactivation is needed for 2082215976Sjmallett a second power-up/init, a DRESET assertion is required 2083215976Sjmallett between the first and the second. */ 2084215976Sjmallett uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle 2085215976Sjmallett increments. A Refresh sequence is triggered when bits 2086215976Sjmallett [24:18] are equal to 0, and a ZQCS sequence is triggered 2087215976Sjmallett when [36:18] are equal to 0. 2088215976Sjmallett Program [24:18] to RND-DN(tREFI/clkPeriod/512) 2089215976Sjmallett Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note 2090215976Sjmallett that this value should always be greater than 32, to account for 2091215976Sjmallett resistor calibration delays. 2092215976Sjmallett 000_00000000_00000000: RESERVED 2093215976Sjmallett Max Refresh interval = 127 * 512 = 65024 CKs 2094215976Sjmallett Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK 2095215976Sjmallett LMC*_CONFIG[INIT_STATUS] determines which ranks receive 2096215976Sjmallett the REF / ZQCS. LMC does not send any refreshes / ZQCS's 2097215976Sjmallett when LMC*_CONFIG[INIT_STATUS]=0. */ 2098215976Sjmallett uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter, 2099215976Sjmallett and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT 2100215976Sjmallett CSR's. SW should write this to a one, then re-write 2101215976Sjmallett it to a zero to cause the reset. */ 2102215976Sjmallett uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 2103215976Sjmallett 0=disabled, 1=enabled */ 2104215976Sjmallett uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after 2105215976Sjmallett having waited for 2^FORCEWRITE CK cycles. 0=disabled. */ 2106215976Sjmallett uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory 2107215976Sjmallett controller has been idle for 2^(2+IDLEPOWER) CK cycles. 2108215976Sjmallett 0=disabled. 2109215976Sjmallett This field should only be programmed after initialization. 2110215976Sjmallett LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL 2111215976Sjmallett is disabled during the precharge power-down. */ 2112215976Sjmallett uint64_t pbank_lsb : 4; /**< DIMM address bit select 2113215976Sjmallett Reverting to the explanation for ROW_LSB, 2114215976Sjmallett PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits 2115215976Sjmallett Decoding for pbank_lsb 2116215976Sjmallett - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) 2117215976Sjmallett - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " 2118215976Sjmallett - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " 2119215976Sjmallett - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " 2120215976Sjmallett - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " 2121215976Sjmallett - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " 2122215976Sjmallett - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " 2123215976Sjmallett - 0111:DIMM = 0 / rank = mem_adr[34] " 2124215976Sjmallett - 1000-1111: RESERVED 2125215976Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 2126215976Sjmallett DDR3 parts, the column address width = 10, so with 2127215976Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] 2128215976Sjmallett With rank_ena = 0, pbank_lsb = 2 2129215976Sjmallett With rank_ena = 1, pbank_lsb = 3 */ 2130215976Sjmallett uint64_t row_lsb : 3; /**< Row Address bit select 2131215976Sjmallett Encoding used to determine which memory address 2132215976Sjmallett bit position represents the low order DDR ROW address. 2133215976Sjmallett The processor's memory address[34:7] needs to be 2134215976Sjmallett translated to DRAM addresses (bnk,row,col,rank and DIMM) 2135215976Sjmallett and that is a function of the following: 2136215976Sjmallett 1. Datapath Width (64) 2137215976Sjmallett 2. \# Banks (8) 2138215976Sjmallett 3. \# Column Bits of the memory part - spec'd indirectly 2139215976Sjmallett by this register. 2140215976Sjmallett 4. \# Row Bits of the memory part - spec'd indirectly 2141215976Sjmallett 5. \# Ranks in a DIMM - spec'd by RANK_ENA 2142215976Sjmallett 6. \# DIMM's in the system by the register below (PBANK_LSB). 2143215976Sjmallett Decoding for row_lsb 2144215976Sjmallett - 000: row_lsb = mem_adr[14] 2145215976Sjmallett - 001: row_lsb = mem_adr[15] 2146215976Sjmallett - 010: row_lsb = mem_adr[16] 2147215976Sjmallett - 011: row_lsb = mem_adr[17] 2148215976Sjmallett - 100: row_lsb = mem_adr[18] 2149215976Sjmallett - 101: row_lsb = mem_adr[19] 2150215976Sjmallett - 110: row_lsb = mem_adr[20] 2151215976Sjmallett - 111: RESERVED 2152215976Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 2153215976Sjmallett DDR3 parts, the column address width = 10, so with 2154215976Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */ 2155215976Sjmallett uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC 2156215976Sjmallett check/correct logic. Should be 1 when used with DIMMs 2157215976Sjmallett with ECC. 0, otherwise. 2158215976Sjmallett When this mode is turned on, DQ[71:64] 2159215976Sjmallett on writes, will contain the ECC code generated for 2160215976Sjmallett the 64 bits of data which will 2161215976Sjmallett written in the memory and then later on reads, used 2162215976Sjmallett to check for Single bit error (which will be auto- 2163215976Sjmallett corrected) and Double Bit error (which will be 2164215976Sjmallett reported). When not turned on, DQ[71:64] 2165215976Sjmallett are driven to 0. Please refer to SEC_ERR, DED_ERR, 2166215976Sjmallett LMC*_FADR, and LMC*_ECC_SYND registers 2167215976Sjmallett for diagnostics information when there is an error. */ 2168215976Sjmallett uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is 2169215976Sjmallett selected by LMC*_CONFIG[SEQUENCE]. This register is a 2170215976Sjmallett oneshot and clears itself each time it is set. */ 2171215976Sjmallett#else 2172215976Sjmallett uint64_t init_start : 1; 2173215976Sjmallett uint64_t ecc_ena : 1; 2174215976Sjmallett uint64_t row_lsb : 3; 2175215976Sjmallett uint64_t pbank_lsb : 4; 2176215976Sjmallett uint64_t idlepower : 3; 2177215976Sjmallett uint64_t forcewrite : 4; 2178215976Sjmallett uint64_t ecc_adr : 1; 2179215976Sjmallett uint64_t reset : 1; 2180215976Sjmallett uint64_t ref_zqcs_int : 19; 2181215976Sjmallett uint64_t sequence : 3; 2182215976Sjmallett uint64_t early_dqx : 1; 2183215976Sjmallett uint64_t sref_with_dll : 1; 2184215976Sjmallett uint64_t rank_ena : 1; 2185215976Sjmallett uint64_t rankmask : 4; 2186215976Sjmallett uint64_t mirrmask : 4; 2187215976Sjmallett uint64_t init_status : 4; 2188215976Sjmallett uint64_t early_unload_d0_r0 : 1; 2189215976Sjmallett uint64_t early_unload_d0_r1 : 1; 2190215976Sjmallett uint64_t early_unload_d1_r0 : 1; 2191215976Sjmallett uint64_t early_unload_d1_r1 : 1; 2192215976Sjmallett uint64_t reserved_59_63 : 5; 2193215976Sjmallett#endif 2194232812Sjmallett } cn63xx; 2195232812Sjmallett struct cvmx_lmcx_config_cn63xxp1 { 2196232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 2197215976Sjmallett uint64_t reserved_55_63 : 9; 2198215976Sjmallett uint64_t init_status : 4; /**< Indicates status of initialization 2199215976Sjmallett INIT_STATUS[n] = 1 implies rank n has been initialized 2200215976Sjmallett SW must set necessary INIT_STATUS bits with the 2201215976Sjmallett same LMC*_CONFIG write that initiates 2202215976Sjmallett power-up/init and self-refresh exit sequences 2203215976Sjmallett (if the required INIT_STATUS bits are not already 2204215976Sjmallett set before LMC initiates the sequence). 2205215976Sjmallett INIT_STATUS determines the chip-selects that assert 2206215976Sjmallett during refresh, ZQCS, and precharge power-down and 2207215976Sjmallett self-refresh entry/exit SEQUENCE's. */ 2208215976Sjmallett uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored. 2209215976Sjmallett MIRRMASK<n> = 1 means Rank n addresses are mirrored 2210215976Sjmallett for 0 <= n <= 3 2211215976Sjmallett A mirrored read/write has these differences: 2212215976Sjmallett - DDR_BA<1> is swapped with DDR_BA<0> 2213215976Sjmallett - DDR_A<8> is swapped with DDR_A<7> 2214215976Sjmallett - DDR_A<6> is swapped with DDR_A<5> 2215215976Sjmallett - DDR_A<4> is swapped with DDR_A<3> 2216215976Sjmallett When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */ 2217215976Sjmallett uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized. 2218215976Sjmallett To write-level/read-level/initialize rank i, set RANKMASK<i> 2219215976Sjmallett RANK_ENA=1 RANK_ENA=0 2220215976Sjmallett RANKMASK<0> = DIMM0_CS0 DIMM0_CS0 2221215976Sjmallett RANKMASK<1> = DIMM0_CS1 MBZ 2222215976Sjmallett RANKMASK<2> = DIMM1_CS0 DIMM1_CS0 2223215976Sjmallett RANKMASK<3> = DIMM1_CS1 MBZ 2224215976Sjmallett For read/write leveling, each rank has to be leveled separately, 2225215976Sjmallett so RANKMASK should only have one bit set. 2226215976Sjmallett RANKMASK is not used during self-refresh entry/exit and 2227215976Sjmallett precharge power-down entry/exit instruction sequences. 2228215976Sjmallett When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */ 2229215976Sjmallett uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs) 2230215976Sjmallett For dual-rank DIMMs, the rank_ena bit will enable 2231215976Sjmallett the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the 2232215976Sjmallett (pbank_lsb-1) address bit. 2233215976Sjmallett Write 0 for SINGLE ranked DIMM's. */ 2234215976Sjmallett uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2 2235215976Sjmallett When set, self-refresh entry and exit instruction sequences 2236215976Sjmallett write MR1 and MR2 (in all ranks). (The writes occur before 2237215976Sjmallett self-refresh entry, and after self-refresh exit.) 2238215976Sjmallett When clear, self-refresh entry and exit instruction sequences 2239215976Sjmallett do not write any registers in the DDR3 parts. */ 2240215976Sjmallett uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when 2241215976Sjmallett the shortest DQx lines have a larger delay than the CK line */ 2242215976Sjmallett uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1 2243215976Sjmallett transition on LMC*_CONFIG[INIT_START]. 2244215976Sjmallett SEQUENCE=0=power-up/init: 2245215976Sjmallett - RANKMASK selects participating ranks (should be all ranks with attached DRAM) 2246215976Sjmallett - INIT_STATUS must equal RANKMASK 2247215976Sjmallett - DDR_CKE* signals activated (if they weren't already active) 2248215976Sjmallett - RDIMM register control words 0-15 will be written to RANKMASK-selected 2249215976Sjmallett RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding 2250215976Sjmallett LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and 2251215976Sjmallett LMC*_DIMM_CTL descriptions below for more details.) 2252215976Sjmallett - MR0, MR1, MR2, and MR3 will be written to selected ranks 2253215976Sjmallett SEQUENCE=1=read-leveling: 2254215976Sjmallett - RANKMASK selects the rank to be read-leveled 2255215976Sjmallett - MR3 written to selected rank 2256215976Sjmallett SEQUENCE=2=self-refresh entry: 2257215976Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2258215976Sjmallett - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1 2259215976Sjmallett - DDR_CKE* signals de-activated 2260215976Sjmallett SEQUENCE=3=self-refresh exit: 2261215976Sjmallett - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM) 2262215976Sjmallett - DDR_CKE* signals activated 2263215976Sjmallett - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1 2264215976Sjmallett SEQUENCE=4=precharge power-down entry: 2265215976Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2266215976Sjmallett - DDR_CKE* signals de-activated 2267215976Sjmallett SEQUENCE=5=precharge power-down exit: 2268215976Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2269215976Sjmallett - DDR_CKE* signals activated 2270215976Sjmallett SEQUENCE=6=write-leveling: 2271215976Sjmallett - RANKMASK selects the rank to be write-leveled 2272215976Sjmallett - INIT_STATUS must indicate all ranks with attached DRAM 2273215976Sjmallett - MR1 and MR2 written to INIT_STATUS-selected ranks 2274215976Sjmallett SEQUENCE=7=illegal 2275215976Sjmallett Precharge power-down entry and exit SEQUENCE's may also 2276215976Sjmallett be automatically generated by the HW when IDLEPOWER!=0. 2277215976Sjmallett Self-refresh entry SEQUENCE's may also be automatically 2278215976Sjmallett generated by hardware upon a chip warm or soft reset 2279215976Sjmallett sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set. 2280215976Sjmallett LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values 2281215976Sjmallett to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences. 2282215976Sjmallett Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details. 2283215976Sjmallett If there are two consecutive power-up/init's without 2284215976Sjmallett a DRESET assertion between them, LMC asserts DDR_CKE* as part of 2285215976Sjmallett the first power-up/init, and continues to assert DDR_CKE* 2286215976Sjmallett through the remainder of the first and the second power-up/init. 2287215976Sjmallett If DDR_CKE* deactivation and reactivation is needed for 2288215976Sjmallett a second power-up/init, a DRESET assertion is required 2289215976Sjmallett between the first and the second. */ 2290215976Sjmallett uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle 2291215976Sjmallett increments. A Refresh sequence is triggered when bits 2292215976Sjmallett [24:18] are equal to 0, and a ZQCS sequence is triggered 2293215976Sjmallett when [36:18] are equal to 0. 2294215976Sjmallett Program [24:18] to RND-DN(tREFI/clkPeriod/512) 2295215976Sjmallett Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note 2296215976Sjmallett that this value should always be greater than 32, to account for 2297215976Sjmallett resistor calibration delays. 2298215976Sjmallett 000_00000000_00000000: RESERVED 2299215976Sjmallett Max Refresh interval = 127 * 512 = 65024 CKs 2300215976Sjmallett Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK 2301215976Sjmallett LMC*_CONFIG[INIT_STATUS] determines which ranks receive 2302215976Sjmallett the REF / ZQCS. LMC does not send any refreshes / ZQCS's 2303215976Sjmallett when LMC*_CONFIG[INIT_STATUS]=0. */ 2304215976Sjmallett uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter, 2305215976Sjmallett and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT 2306215976Sjmallett CSR's. SW should write this to a one, then re-write 2307215976Sjmallett it to a zero to cause the reset. */ 2308215976Sjmallett uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 2309215976Sjmallett 0=disabled, 1=enabled */ 2310215976Sjmallett uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after 2311215976Sjmallett having waited for 2^FORCEWRITE CK cycles. 0=disabled. */ 2312215976Sjmallett uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory 2313215976Sjmallett controller has been idle for 2^(2+IDLEPOWER) CK cycles. 2314215976Sjmallett 0=disabled. 2315215976Sjmallett This field should only be programmed after initialization. 2316215976Sjmallett LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL 2317215976Sjmallett is disabled during the precharge power-down. */ 2318215976Sjmallett uint64_t pbank_lsb : 4; /**< DIMM address bit select 2319215976Sjmallett Reverting to the explanation for ROW_LSB, 2320215976Sjmallett PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits 2321215976Sjmallett Decoding for pbank_lsb 2322215976Sjmallett - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) 2323215976Sjmallett - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " 2324215976Sjmallett - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " 2325215976Sjmallett - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " 2326215976Sjmallett - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " 2327215976Sjmallett - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " 2328215976Sjmallett - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " 2329215976Sjmallett - 0111:DIMM = 0 / rank = mem_adr[34] " 2330215976Sjmallett - 1000-1111: RESERVED 2331215976Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 2332215976Sjmallett DDR3 parts, the column address width = 10, so with 2333215976Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] 2334215976Sjmallett With rank_ena = 0, pbank_lsb = 2 2335215976Sjmallett With rank_ena = 1, pbank_lsb = 3 */ 2336215976Sjmallett uint64_t row_lsb : 3; /**< Row Address bit select 2337215976Sjmallett Encoding used to determine which memory address 2338215976Sjmallett bit position represents the low order DDR ROW address. 2339215976Sjmallett The processor's memory address[34:7] needs to be 2340215976Sjmallett translated to DRAM addresses (bnk,row,col,rank and DIMM) 2341215976Sjmallett and that is a function of the following: 2342215976Sjmallett 1. Datapath Width (64) 2343215976Sjmallett 2. \# Banks (8) 2344215976Sjmallett 3. \# Column Bits of the memory part - spec'd indirectly 2345215976Sjmallett by this register. 2346215976Sjmallett 4. \# Row Bits of the memory part - spec'd indirectly 2347215976Sjmallett 5. \# Ranks in a DIMM - spec'd by RANK_ENA 2348215976Sjmallett 6. \# DIMM's in the system by the register below (PBANK_LSB). 2349215976Sjmallett Decoding for row_lsb 2350215976Sjmallett - 000: row_lsb = mem_adr[14] 2351215976Sjmallett - 001: row_lsb = mem_adr[15] 2352215976Sjmallett - 010: row_lsb = mem_adr[16] 2353215976Sjmallett - 011: row_lsb = mem_adr[17] 2354215976Sjmallett - 100: row_lsb = mem_adr[18] 2355215976Sjmallett - 101: row_lsb = mem_adr[19] 2356215976Sjmallett - 110: row_lsb = mem_adr[20] 2357215976Sjmallett - 111: RESERVED 2358215976Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 2359215976Sjmallett DDR3 parts, the column address width = 10, so with 2360215976Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */ 2361215976Sjmallett uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC 2362215976Sjmallett check/correct logic. Should be 1 when used with DIMMs 2363215976Sjmallett with ECC. 0, otherwise. 2364215976Sjmallett When this mode is turned on, DQ[71:64] 2365215976Sjmallett on writes, will contain the ECC code generated for 2366215976Sjmallett the 64 bits of data which will 2367215976Sjmallett written in the memory and then later on reads, used 2368215976Sjmallett to check for Single bit error (which will be auto- 2369215976Sjmallett corrected) and Double Bit error (which will be 2370215976Sjmallett reported). When not turned on, DQ[71:64] 2371215976Sjmallett are driven to 0. Please refer to SEC_ERR, DED_ERR, 2372215976Sjmallett LMC*_FADR, and LMC*_ECC_SYND registers 2373215976Sjmallett for diagnostics information when there is an error. */ 2374215976Sjmallett uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is 2375215976Sjmallett selected by LMC*_CONFIG[SEQUENCE]. This register is a 2376215976Sjmallett oneshot and clears itself each time it is set. */ 2377215976Sjmallett#else 2378215976Sjmallett uint64_t init_start : 1; 2379215976Sjmallett uint64_t ecc_ena : 1; 2380215976Sjmallett uint64_t row_lsb : 3; 2381215976Sjmallett uint64_t pbank_lsb : 4; 2382215976Sjmallett uint64_t idlepower : 3; 2383215976Sjmallett uint64_t forcewrite : 4; 2384215976Sjmallett uint64_t ecc_adr : 1; 2385215976Sjmallett uint64_t reset : 1; 2386215976Sjmallett uint64_t ref_zqcs_int : 19; 2387215976Sjmallett uint64_t sequence : 3; 2388215976Sjmallett uint64_t early_dqx : 1; 2389215976Sjmallett uint64_t sref_with_dll : 1; 2390215976Sjmallett uint64_t rank_ena : 1; 2391215976Sjmallett uint64_t rankmask : 4; 2392215976Sjmallett uint64_t mirrmask : 4; 2393215976Sjmallett uint64_t init_status : 4; 2394215976Sjmallett uint64_t reserved_55_63 : 9; 2395215976Sjmallett#endif 2396215976Sjmallett } cn63xxp1; 2397232812Sjmallett struct cvmx_lmcx_config_cn66xx { 2398232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 2399232812Sjmallett uint64_t reserved_60_63 : 4; 2400232812Sjmallett uint64_t scrz : 1; /**< Hide LMC*_SCRAMBLE_CFG0 and LMC*_SCRAMBLE_CFG1 when set */ 2401232812Sjmallett uint64_t early_unload_d1_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 3 2402232812Sjmallett reads 2403232812Sjmallett The recommended EARLY_UNLOAD_D1_R1 value can be calculated 2404232812Sjmallett after the final LMC*_RLEVEL_RANK3[BYTE*] values are 2405232812Sjmallett selected (as part of read-leveling initialization). 2406232812Sjmallett Then, determine the largest read-leveling setting 2407232812Sjmallett for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi]) 2408232812Sjmallett across all i), then set EARLY_UNLOAD_D1_R1 2409232812Sjmallett when the low two bits of this largest setting is not 2410232812Sjmallett 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */ 2411232812Sjmallett uint64_t early_unload_d1_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 2 2412232812Sjmallett reads 2413232812Sjmallett The recommended EARLY_UNLOAD_D1_RO value can be calculated 2414232812Sjmallett after the final LMC*_RLEVEL_RANK2[BYTE*] values are 2415232812Sjmallett selected (as part of read-leveling initialization). 2416232812Sjmallett Then, determine the largest read-leveling setting 2417232812Sjmallett for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi]) 2418232812Sjmallett across all i), then set EARLY_UNLOAD_D1_RO 2419232812Sjmallett when the low two bits of this largest setting is not 2420232812Sjmallett 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */ 2421232812Sjmallett uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1 2422232812Sjmallett reads 2423232812Sjmallett The recommended EARLY_UNLOAD_D0_R1 value can be calculated 2424232812Sjmallett after the final LMC*_RLEVEL_RANK1[BYTE*] values are 2425232812Sjmallett selected (as part of read-leveling initialization). 2426232812Sjmallett Then, determine the largest read-leveling setting 2427232812Sjmallett for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi]) 2428232812Sjmallett across all i), then set EARLY_UNLOAD_D0_R1 2429232812Sjmallett when the low two bits of this largest setting is not 2430232812Sjmallett 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */ 2431232812Sjmallett uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0 2432232812Sjmallett reads. 2433232812Sjmallett The recommended EARLY_UNLOAD_D0_R0 value can be calculated 2434232812Sjmallett after the final LMC*_RLEVEL_RANK0[BYTE*] values are 2435232812Sjmallett selected (as part of read-leveling initialization). 2436232812Sjmallett Then, determine the largest read-leveling setting 2437232812Sjmallett for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi]) 2438232812Sjmallett across all i), then set EARLY_UNLOAD_D0_R0 2439232812Sjmallett when the low two bits of this largest setting is not 2440232812Sjmallett 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */ 2441232812Sjmallett uint64_t init_status : 4; /**< Indicates status of initialization 2442232812Sjmallett INIT_STATUS[n] = 1 implies rank n has been initialized 2443232812Sjmallett SW must set necessary INIT_STATUS bits with the 2444232812Sjmallett same LMC*_CONFIG write that initiates 2445232812Sjmallett power-up/init and self-refresh exit sequences 2446232812Sjmallett (if the required INIT_STATUS bits are not already 2447232812Sjmallett set before LMC initiates the sequence). 2448232812Sjmallett INIT_STATUS determines the chip-selects that assert 2449232812Sjmallett during refresh, ZQCS, and precharge power-down and 2450232812Sjmallett self-refresh entry/exit SEQUENCE's. */ 2451232812Sjmallett uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored. 2452232812Sjmallett MIRRMASK<n> = 1 means Rank n addresses are mirrored 2453232812Sjmallett for 0 <= n <= 3 2454232812Sjmallett A mirrored read/write has these differences: 2455232812Sjmallett - DDR_BA<1> is swapped with DDR_BA<0> 2456232812Sjmallett - DDR_A<8> is swapped with DDR_A<7> 2457232812Sjmallett - DDR_A<6> is swapped with DDR_A<5> 2458232812Sjmallett - DDR_A<4> is swapped with DDR_A<3> 2459232812Sjmallett When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */ 2460232812Sjmallett uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized. 2461232812Sjmallett To write-level/read-level/initialize rank i, set RANKMASK<i> 2462232812Sjmallett RANK_ENA=1 RANK_ENA=0 2463232812Sjmallett RANKMASK<0> = DIMM0_CS0 DIMM0_CS0 2464232812Sjmallett RANKMASK<1> = DIMM0_CS1 MBZ 2465232812Sjmallett RANKMASK<2> = DIMM1_CS0 DIMM1_CS0 2466232812Sjmallett RANKMASK<3> = DIMM1_CS1 MBZ 2467232812Sjmallett For read/write leveling, each rank has to be leveled separately, 2468232812Sjmallett so RANKMASK should only have one bit set. 2469232812Sjmallett RANKMASK is not used during self-refresh entry/exit and 2470232812Sjmallett precharge power-down entry/exit instruction sequences. 2471232812Sjmallett When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */ 2472232812Sjmallett uint64_t rank_ena : 1; /**< RANK ena (for use with dual-rank DIMMs) 2473232812Sjmallett For dual-rank DIMMs, the rank_ena bit will enable 2474232812Sjmallett the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the 2475232812Sjmallett (pbank_lsb-1) address bit. 2476232812Sjmallett Write 0 for SINGLE ranked DIMM's. */ 2477232812Sjmallett uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2 2478232812Sjmallett When set, self-refresh entry and exit instruction sequences 2479232812Sjmallett write MR1 and MR2 (in all ranks). (The writes occur before 2480232812Sjmallett self-refresh entry, and after self-refresh exit.) 2481232812Sjmallett When clear, self-refresh entry and exit instruction sequences 2482232812Sjmallett do not write any registers in the DDR3 parts. */ 2483232812Sjmallett uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when 2484232812Sjmallett the shortest DQx lines have a larger delay than the CK line */ 2485232812Sjmallett uint64_t sequence : 3; /**< Selects the sequence that LMC runs after a 0->1 2486232812Sjmallett transition on LMC*_CONFIG[INIT_START]. 2487232812Sjmallett SEQUENCE=0=power-up/init: 2488232812Sjmallett - RANKMASK selects participating ranks (should be all ranks with attached DRAM) 2489232812Sjmallett - INIT_STATUS must equal RANKMASK 2490232812Sjmallett - DDR_CKE* signals activated (if they weren't already active) 2491232812Sjmallett - RDIMM register control words 0-15 will be written to RANKMASK-selected 2492232812Sjmallett RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding 2493232812Sjmallett LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and 2494232812Sjmallett LMC*_DIMM_CTL descriptions below for more details.) 2495232812Sjmallett - MR0, MR1, MR2, and MR3 will be written to selected ranks 2496232812Sjmallett SEQUENCE=1=read-leveling: 2497232812Sjmallett - RANKMASK selects the rank to be read-leveled 2498232812Sjmallett - MR3 written to selected rank 2499232812Sjmallett SEQUENCE=2=self-refresh entry: 2500232812Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2501232812Sjmallett - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1 2502232812Sjmallett - DDR_CKE* signals de-activated 2503232812Sjmallett SEQUENCE=3=self-refresh exit: 2504232812Sjmallett - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM) 2505232812Sjmallett - DDR_CKE* signals activated 2506232812Sjmallett - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1 2507232812Sjmallett SEQUENCE=4=precharge power-down entry: 2508232812Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2509232812Sjmallett - DDR_CKE* signals de-activated 2510232812Sjmallett SEQUENCE=5=precharge power-down exit: 2511232812Sjmallett - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM) 2512232812Sjmallett - DDR_CKE* signals activated 2513232812Sjmallett SEQUENCE=6=write-leveling: 2514232812Sjmallett - RANKMASK selects the rank to be write-leveled 2515232812Sjmallett - INIT_STATUS must indicate all ranks with attached DRAM 2516232812Sjmallett - MR1 and MR2 written to INIT_STATUS-selected ranks 2517232812Sjmallett SEQUENCE=7=illegal 2518232812Sjmallett Precharge power-down entry and exit SEQUENCE's may also 2519232812Sjmallett be automatically generated by the HW when IDLEPOWER!=0. 2520232812Sjmallett Self-refresh entry SEQUENCE's may also be automatically 2521232812Sjmallett generated by hardware upon a chip warm or soft reset 2522232812Sjmallett sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set. 2523232812Sjmallett LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values 2524232812Sjmallett to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences. 2525232812Sjmallett Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details. 2526232812Sjmallett If there are two consecutive power-up/init's without 2527232812Sjmallett a DRESET assertion between them, LMC asserts DDR_CKE* as part of 2528232812Sjmallett the first power-up/init, and continues to assert DDR_CKE* 2529232812Sjmallett through the remainder of the first and the second power-up/init. 2530232812Sjmallett If DDR_CKE* deactivation and reactivation is needed for 2531232812Sjmallett a second power-up/init, a DRESET assertion is required 2532232812Sjmallett between the first and the second. */ 2533232812Sjmallett uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle 2534232812Sjmallett increments. A Refresh sequence is triggered when bits 2535232812Sjmallett [24:18] are equal to 0, and a ZQCS sequence is triggered 2536232812Sjmallett when [36:18] are equal to 0. 2537232812Sjmallett Program [24:18] to RND-DN(tREFI/clkPeriod/512) 2538232812Sjmallett Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note 2539232812Sjmallett that this value should always be greater than 32, to account for 2540232812Sjmallett resistor calibration delays. 2541232812Sjmallett 000_00000000_00000000: RESERVED 2542232812Sjmallett Max Refresh interval = 127 * 512 = 65024 CKs 2543232812Sjmallett Max ZQCS interval = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK 2544232812Sjmallett LMC*_CONFIG[INIT_STATUS] determines which ranks receive 2545232812Sjmallett the REF / ZQCS. LMC does not send any refreshes / ZQCS's 2546232812Sjmallett when LMC*_CONFIG[INIT_STATUS]=0. */ 2547232812Sjmallett uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter, 2548232812Sjmallett and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT 2549232812Sjmallett CSR's. SW should write this to a one, then re-write 2550232812Sjmallett it to a zero to cause the reset. */ 2551232812Sjmallett uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 2552232812Sjmallett 0=disabled, 1=enabled */ 2553232812Sjmallett uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after 2554232812Sjmallett having waited for 2^FORCEWRITE CK cycles. 0=disabled. */ 2555232812Sjmallett uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory 2556232812Sjmallett controller has been idle for 2^(2+IDLEPOWER) CK cycles. 2557232812Sjmallett 0=disabled. 2558232812Sjmallett This field should only be programmed after initialization. 2559232812Sjmallett LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL 2560232812Sjmallett is disabled during the precharge power-down. */ 2561232812Sjmallett uint64_t pbank_lsb : 4; /**< DIMM address bit select 2562232812Sjmallett Reverting to the explanation for ROW_LSB, 2563232812Sjmallett PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits 2564232812Sjmallett Decoding for pbank_lsb 2565232812Sjmallett - 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) 2566232812Sjmallett - 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " 2567232812Sjmallett - 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " 2568232812Sjmallett - 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " 2569232812Sjmallett - 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " 2570232812Sjmallett - 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " 2571232812Sjmallett - 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " 2572232812Sjmallett - 0111:DIMM = 0 / rank = mem_adr[34] " 2573232812Sjmallett - 1000-1111: RESERVED 2574232812Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 2575232812Sjmallett DDR3 parts, the column address width = 10, so with 2576232812Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] 2577232812Sjmallett With rank_ena = 0, pbank_lsb = 2 2578232812Sjmallett With rank_ena = 1, pbank_lsb = 3 */ 2579232812Sjmallett uint64_t row_lsb : 3; /**< Row Address bit select 2580232812Sjmallett Encoding used to determine which memory address 2581232812Sjmallett bit position represents the low order DDR ROW address. 2582232812Sjmallett The processor's memory address[34:7] needs to be 2583232812Sjmallett translated to DRAM addresses (bnk,row,col,rank and DIMM) 2584232812Sjmallett and that is a function of the following: 2585232812Sjmallett 1. Datapath Width (64) 2586232812Sjmallett 2. \# Banks (8) 2587232812Sjmallett 3. \# Column Bits of the memory part - spec'd indirectly 2588232812Sjmallett by this register. 2589232812Sjmallett 4. \# Row Bits of the memory part - spec'd indirectly 2590232812Sjmallett 5. \# Ranks in a DIMM - spec'd by RANK_ENA 2591232812Sjmallett 6. \# DIMM's in the system by the register below (PBANK_LSB). 2592232812Sjmallett Decoding for row_lsb 2593232812Sjmallett - 000: row_lsb = mem_adr[14] 2594232812Sjmallett - 001: row_lsb = mem_adr[15] 2595232812Sjmallett - 010: row_lsb = mem_adr[16] 2596232812Sjmallett - 011: row_lsb = mem_adr[17] 2597232812Sjmallett - 100: row_lsb = mem_adr[18] 2598232812Sjmallett - 101: row_lsb = mem_adr[19] 2599232812Sjmallett - 110: row_lsb = mem_adr[20] 2600232812Sjmallett - 111: RESERVED 2601232812Sjmallett For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 2602232812Sjmallett DDR3 parts, the column address width = 10, so with 2603232812Sjmallett 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */ 2604232812Sjmallett uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC 2605232812Sjmallett check/correct logic. Should be 1 when used with DIMMs 2606232812Sjmallett with ECC. 0, otherwise. 2607232812Sjmallett When this mode is turned on, DQ[71:64] 2608232812Sjmallett on writes, will contain the ECC code generated for 2609232812Sjmallett the 64 bits of data which will 2610232812Sjmallett written in the memory and then later on reads, used 2611232812Sjmallett to check for Single bit error (which will be auto- 2612232812Sjmallett corrected) and Double Bit error (which will be 2613232812Sjmallett reported). When not turned on, DQ[71:64] 2614232812Sjmallett are driven to 0. Please refer to SEC_ERR, DED_ERR, 2615232812Sjmallett LMC*_FADR, LMC*_SCRAMBLED_FADR and LMC*_ECC_SYND registers 2616232812Sjmallett for diagnostics information when there is an error. */ 2617232812Sjmallett uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is 2618232812Sjmallett selected by LMC*_CONFIG[SEQUENCE]. This register is a 2619232812Sjmallett oneshot and clears itself each time it is set. */ 2620232812Sjmallett#else 2621232812Sjmallett uint64_t init_start : 1; 2622232812Sjmallett uint64_t ecc_ena : 1; 2623232812Sjmallett uint64_t row_lsb : 3; 2624232812Sjmallett uint64_t pbank_lsb : 4; 2625232812Sjmallett uint64_t idlepower : 3; 2626232812Sjmallett uint64_t forcewrite : 4; 2627232812Sjmallett uint64_t ecc_adr : 1; 2628232812Sjmallett uint64_t reset : 1; 2629232812Sjmallett uint64_t ref_zqcs_int : 19; 2630232812Sjmallett uint64_t sequence : 3; 2631232812Sjmallett uint64_t early_dqx : 1; 2632232812Sjmallett uint64_t sref_with_dll : 1; 2633232812Sjmallett uint64_t rank_ena : 1; 2634232812Sjmallett uint64_t rankmask : 4; 2635232812Sjmallett uint64_t mirrmask : 4; 2636232812Sjmallett uint64_t init_status : 4; 2637232812Sjmallett uint64_t early_unload_d0_r0 : 1; 2638232812Sjmallett uint64_t early_unload_d0_r1 : 1; 2639232812Sjmallett uint64_t early_unload_d1_r0 : 1; 2640232812Sjmallett uint64_t early_unload_d1_r1 : 1; 2641232812Sjmallett uint64_t scrz : 1; 2642232812Sjmallett uint64_t reserved_60_63 : 4; 2643232812Sjmallett#endif 2644232812Sjmallett } cn66xx; 2645232812Sjmallett struct cvmx_lmcx_config_cn63xx cn68xx; 2646232812Sjmallett struct cvmx_lmcx_config_cn63xx cn68xxp1; 2647232812Sjmallett struct cvmx_lmcx_config_s cnf71xx; 2648215976Sjmallett}; 2649215976Sjmalletttypedef union cvmx_lmcx_config cvmx_lmcx_config_t; 2650215976Sjmallett 2651215976Sjmallett/** 2652215976Sjmallett * cvmx_lmc#_control 2653215976Sjmallett * 2654215976Sjmallett * LMC_CONTROL = LMC Control 2655215976Sjmallett * This register is an assortment of various control fields needed by the memory controller 2656215976Sjmallett */ 2657232812Sjmallettunion cvmx_lmcx_control { 2658215976Sjmallett uint64_t u64; 2659232812Sjmallett struct cvmx_lmcx_control_s { 2660232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 2661232812Sjmallett uint64_t scramble_ena : 1; /**< When set, will enable the scramble/descramble logic */ 2662232812Sjmallett uint64_t thrcnt : 12; /**< Fine Count */ 2663232812Sjmallett uint64_t persub : 8; /**< Offset for DFA rate-matching */ 2664232812Sjmallett uint64_t thrmax : 4; /**< Fine Rate Matching Max Bucket Size 2665232812Sjmallett 0 = Reserved 2666232812Sjmallett In conjunction with the Coarse Rate Matching Logic, the Fine Rate 2667232812Sjmallett Matching Logic gives SW the ability to prioritize DFA Rds over 2668232812Sjmallett L2C Writes. Higher PERSUB values result in a lower DFA Rd 2669232812Sjmallett bandwidth. */ 2670232812Sjmallett uint64_t crm_cnt : 5; /**< Coarse Count */ 2671232812Sjmallett uint64_t crm_thr : 5; /**< Coarse Rate Matching Threshold */ 2672232812Sjmallett uint64_t crm_max : 5; /**< Coarse Rate Matching Max Bucket Size 2673232812Sjmallett 0 = Reserved 2674232812Sjmallett The Coarse Rate Matching Logic is used to control the bandwidth 2675232812Sjmallett allocated to DFA Rds. CRM_MAX is subdivided into two regions 2676232812Sjmallett with DFA Rds being preferred over LMC Rd/Wrs when 2677232812Sjmallett CRM_CNT < CRM_THR. CRM_CNT increments by 1 when a DFA Rd is 2678232812Sjmallett slotted and by 2 when a LMC Rd/Wr is slotted, and rolls over 2679232812Sjmallett when CRM_MAX is reached. */ 2680232812Sjmallett uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2681232812Sjmallett RD cmd is delayed an additional CK cycle. */ 2682232812Sjmallett uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2683232812Sjmallett WR cmd is delayed an additional CK cycle. */ 2684232812Sjmallett uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for 2685232812Sjmallett the default DDR_DQ/DQS drivers is delayed an additional BPRCH 2686232812Sjmallett CK cycles. 2687232812Sjmallett 00 = 0 CKs 2688232812Sjmallett 01 = 1 CKs 2689232812Sjmallett 10 = 2 CKs 2690232812Sjmallett 11 = 3 CKs */ 2691232812Sjmallett uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration 2692232812Sjmallett When clear, LMC runs external ZQ calibration 2693232812Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2694232812Sjmallett uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration 2695232812Sjmallett When clear, LMC runs internal ZQ calibration 2696232812Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2697232812Sjmallett uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal 2698232812Sjmallett clock to conserve power when there is no traffic. Note 2699232812Sjmallett that this has no effect on the DDR3 PHY and pads clocks. */ 2700232812Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 2701232812Sjmallett bank[2:0]=address[9:7] ^ address[14:12] 2702232812Sjmallett else 2703232812Sjmallett bank[2:0]=address[9:7] */ 2704232812Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 2705232812Sjmallett forcing reads to interrupt. */ 2706232812Sjmallett uint64_t nxm_write_en : 1; /**< NXM Write mode 2707232812Sjmallett When clear, LMC discards writes to addresses that don't 2708232812Sjmallett exist in the DRAM (as defined by LMC*_NXM configuration). 2709232812Sjmallett When set, LMC completes writes to addresses that don't 2710232812Sjmallett exist in the DRAM at an aliased address. */ 2711232812Sjmallett uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic. 2712232812Sjmallett When set, writes are sent in 2713232812Sjmallett regardless of priority information from L2C. */ 2714232812Sjmallett uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */ 2715232812Sjmallett uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */ 2716232812Sjmallett uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */ 2717232812Sjmallett uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */ 2718232812Sjmallett uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off 2719232812Sjmallett time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier. 2720232812Sjmallett 00 = 0 CKs 2721232812Sjmallett 01 = 1 CKs 2722232812Sjmallett 10 = 2 CKs 2723232812Sjmallett 11 = RESERVED */ 2724232812Sjmallett uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3. 2725232812Sjmallett This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0, 2726232812Sjmallett and clear otherwise. */ 2727232812Sjmallett uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and 2728232812Sjmallett address. This mode helps relieve setup time pressure 2729232812Sjmallett on the Address and command bus which nominally have 2730232812Sjmallett a very large fanout. Please refer to Micron's tech 2731232812Sjmallett note tn_47_01 titled "DDR2-533 Memory Design Guide 2732232812Sjmallett for Two Dimm Unbuffered Systems" for physical details. */ 2733232812Sjmallett uint64_t bwcnt : 1; /**< Bus utilization counter Clear. 2734232812Sjmallett Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and 2735232812Sjmallett LMC*_DCLK_CNT registers. SW should first write this 2736232812Sjmallett field to a one, then write this field to a zero to 2737232812Sjmallett clear the CSR's. */ 2738232812Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 2739232812Sjmallett of JEDEC Registered DIMMs which require address and 2740232812Sjmallett control bits to be registered in the controller. */ 2741232812Sjmallett#else 2742232812Sjmallett uint64_t rdimm_ena : 1; 2743232812Sjmallett uint64_t bwcnt : 1; 2744232812Sjmallett uint64_t ddr2t : 1; 2745232812Sjmallett uint64_t pocas : 1; 2746232812Sjmallett uint64_t fprch2 : 2; 2747232812Sjmallett uint64_t throttle_rd : 1; 2748232812Sjmallett uint64_t throttle_wr : 1; 2749232812Sjmallett uint64_t inorder_rd : 1; 2750232812Sjmallett uint64_t inorder_wr : 1; 2751232812Sjmallett uint64_t elev_prio_dis : 1; 2752232812Sjmallett uint64_t nxm_write_en : 1; 2753232812Sjmallett uint64_t max_write_batch : 4; 2754232812Sjmallett uint64_t xor_bank : 1; 2755232812Sjmallett uint64_t auto_dclkdis : 1; 2756232812Sjmallett uint64_t int_zqcs_dis : 1; 2757232812Sjmallett uint64_t ext_zqcs_dis : 1; 2758232812Sjmallett uint64_t bprch : 2; 2759232812Sjmallett uint64_t wodt_bprch : 1; 2760232812Sjmallett uint64_t rodt_bprch : 1; 2761232812Sjmallett uint64_t crm_max : 5; 2762232812Sjmallett uint64_t crm_thr : 5; 2763232812Sjmallett uint64_t crm_cnt : 5; 2764232812Sjmallett uint64_t thrmax : 4; 2765232812Sjmallett uint64_t persub : 8; 2766232812Sjmallett uint64_t thrcnt : 12; 2767232812Sjmallett uint64_t scramble_ena : 1; 2768232812Sjmallett#endif 2769232812Sjmallett } s; 2770232812Sjmallett struct cvmx_lmcx_control_s cn61xx; 2771232812Sjmallett struct cvmx_lmcx_control_cn63xx { 2772232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 2773215976Sjmallett uint64_t reserved_24_63 : 40; 2774215976Sjmallett uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2775215976Sjmallett RD cmd is delayed an additional CK cycle. */ 2776215976Sjmallett uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2777215976Sjmallett WR cmd is delayed an additional CK cycle. */ 2778215976Sjmallett uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for 2779215976Sjmallett the default DDR_DQ/DQS drivers is delayed an additional BPRCH 2780215976Sjmallett CK cycles. 2781215976Sjmallett 00 = 0 CKs 2782215976Sjmallett 01 = 1 CKs 2783215976Sjmallett 10 = 2 CKs 2784215976Sjmallett 11 = 3 CKs */ 2785215976Sjmallett uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration 2786215976Sjmallett When clear, LMC runs external ZQ calibration 2787215976Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2788215976Sjmallett uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration 2789215976Sjmallett When clear, LMC runs internal ZQ calibration 2790215976Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2791215976Sjmallett uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal 2792215976Sjmallett clock to conserve power when there is no traffic. Note 2793215976Sjmallett that this has no effect on the DDR3 PHY and pads clocks. */ 2794215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 2795215976Sjmallett bank[2:0]=address[9:7] ^ address[14:12] 2796215976Sjmallett else 2797215976Sjmallett bank[2:0]=address[9:7] */ 2798215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 2799215976Sjmallett forcing reads to interrupt. */ 2800215976Sjmallett uint64_t nxm_write_en : 1; /**< NXM Write mode 2801215976Sjmallett When clear, LMC discards writes to addresses that don't 2802215976Sjmallett exist in the DRAM (as defined by LMC*_NXM configuration). 2803215976Sjmallett When set, LMC completes writes to addresses that don't 2804215976Sjmallett exist in the DRAM at an aliased address. */ 2805215976Sjmallett uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic. 2806215976Sjmallett When set, writes are sent in 2807215976Sjmallett regardless of priority information from L2C. */ 2808215976Sjmallett uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */ 2809215976Sjmallett uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */ 2810215976Sjmallett uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */ 2811215976Sjmallett uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */ 2812215976Sjmallett uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off 2813215976Sjmallett time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier. 2814215976Sjmallett 00 = 0 CKs 2815215976Sjmallett 01 = 1 CKs 2816215976Sjmallett 10 = 2 CKs 2817215976Sjmallett 11 = RESERVED */ 2818215976Sjmallett uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3. 2819215976Sjmallett This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0, 2820215976Sjmallett and clear otherwise. */ 2821215976Sjmallett uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and 2822215976Sjmallett address. This mode helps relieve setup time pressure 2823215976Sjmallett on the Address and command bus which nominally have 2824215976Sjmallett a very large fanout. Please refer to Micron's tech 2825215976Sjmallett note tn_47_01 titled "DDR2-533 Memory Design Guide 2826215976Sjmallett for Two Dimm Unbuffered Systems" for physical details. */ 2827215976Sjmallett uint64_t bwcnt : 1; /**< Bus utilization counter Clear. 2828215976Sjmallett Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and 2829215976Sjmallett LMC*_DCLK_CNT registers. SW should first write this 2830215976Sjmallett field to a one, then write this field to a zero to 2831215976Sjmallett clear the CSR's. */ 2832215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 2833215976Sjmallett of JEDEC Registered DIMMs which require address and 2834215976Sjmallett control bits to be registered in the controller. */ 2835215976Sjmallett#else 2836215976Sjmallett uint64_t rdimm_ena : 1; 2837215976Sjmallett uint64_t bwcnt : 1; 2838215976Sjmallett uint64_t ddr2t : 1; 2839215976Sjmallett uint64_t pocas : 1; 2840215976Sjmallett uint64_t fprch2 : 2; 2841215976Sjmallett uint64_t throttle_rd : 1; 2842215976Sjmallett uint64_t throttle_wr : 1; 2843215976Sjmallett uint64_t inorder_rd : 1; 2844215976Sjmallett uint64_t inorder_wr : 1; 2845215976Sjmallett uint64_t elev_prio_dis : 1; 2846215976Sjmallett uint64_t nxm_write_en : 1; 2847215976Sjmallett uint64_t max_write_batch : 4; 2848215976Sjmallett uint64_t xor_bank : 1; 2849215976Sjmallett uint64_t auto_dclkdis : 1; 2850215976Sjmallett uint64_t int_zqcs_dis : 1; 2851215976Sjmallett uint64_t ext_zqcs_dis : 1; 2852215976Sjmallett uint64_t bprch : 2; 2853215976Sjmallett uint64_t wodt_bprch : 1; 2854215976Sjmallett uint64_t rodt_bprch : 1; 2855215976Sjmallett uint64_t reserved_24_63 : 40; 2856215976Sjmallett#endif 2857232812Sjmallett } cn63xx; 2858232812Sjmallett struct cvmx_lmcx_control_cn63xx cn63xxp1; 2859232812Sjmallett struct cvmx_lmcx_control_cn66xx { 2860232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 2861232812Sjmallett uint64_t scramble_ena : 1; /**< When set, will enable the scramble/descramble logic */ 2862232812Sjmallett uint64_t reserved_24_62 : 39; 2863232812Sjmallett uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2864232812Sjmallett RD cmd is delayed an additional CK cycle. */ 2865232812Sjmallett uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2866232812Sjmallett WR cmd is delayed an additional CK cycle. */ 2867232812Sjmallett uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for 2868232812Sjmallett the default DDR_DQ/DQS drivers is delayed an additional BPRCH 2869232812Sjmallett CK cycles. 2870232812Sjmallett 00 = 0 CKs 2871232812Sjmallett 01 = 1 CKs 2872232812Sjmallett 10 = 2 CKs 2873232812Sjmallett 11 = 3 CKs */ 2874232812Sjmallett uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration 2875232812Sjmallett When clear, LMC runs external ZQ calibration 2876232812Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2877232812Sjmallett uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration 2878232812Sjmallett When clear, LMC runs internal ZQ calibration 2879232812Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2880232812Sjmallett uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal 2881232812Sjmallett clock to conserve power when there is no traffic. Note 2882232812Sjmallett that this has no effect on the DDR3 PHY and pads clocks. */ 2883232812Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 2884232812Sjmallett bank[2:0]=address[9:7] ^ address[14:12] 2885232812Sjmallett else 2886232812Sjmallett bank[2:0]=address[9:7] */ 2887232812Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 2888232812Sjmallett forcing reads to interrupt. */ 2889232812Sjmallett uint64_t nxm_write_en : 1; /**< NXM Write mode 2890232812Sjmallett When clear, LMC discards writes to addresses that don't 2891232812Sjmallett exist in the DRAM (as defined by LMC*_NXM configuration). 2892232812Sjmallett When set, LMC completes writes to addresses that don't 2893232812Sjmallett exist in the DRAM at an aliased address. */ 2894232812Sjmallett uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic. 2895232812Sjmallett When set, writes are sent in 2896232812Sjmallett regardless of priority information from L2C. */ 2897232812Sjmallett uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */ 2898232812Sjmallett uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */ 2899232812Sjmallett uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */ 2900232812Sjmallett uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */ 2901232812Sjmallett uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off 2902232812Sjmallett time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier. 2903232812Sjmallett 00 = 0 CKs 2904232812Sjmallett 01 = 1 CKs 2905232812Sjmallett 10 = 2 CKs 2906232812Sjmallett 11 = RESERVED */ 2907232812Sjmallett uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3. 2908232812Sjmallett This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0, 2909232812Sjmallett and clear otherwise. */ 2910232812Sjmallett uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and 2911232812Sjmallett address. This mode helps relieve setup time pressure 2912232812Sjmallett on the Address and command bus which nominally have 2913232812Sjmallett a very large fanout. Please refer to Micron's tech 2914232812Sjmallett note tn_47_01 titled "DDR2-533 Memory Design Guide 2915232812Sjmallett for Two Dimm Unbuffered Systems" for physical details. */ 2916232812Sjmallett uint64_t bwcnt : 1; /**< Bus utilization counter Clear. 2917232812Sjmallett Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and 2918232812Sjmallett LMC*_DCLK_CNT registers. SW should first write this 2919232812Sjmallett field to a one, then write this field to a zero to 2920232812Sjmallett clear the CSR's. */ 2921232812Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 2922232812Sjmallett of JEDEC Registered DIMMs which require address and 2923232812Sjmallett control bits to be registered in the controller. */ 2924232812Sjmallett#else 2925232812Sjmallett uint64_t rdimm_ena : 1; 2926232812Sjmallett uint64_t bwcnt : 1; 2927232812Sjmallett uint64_t ddr2t : 1; 2928232812Sjmallett uint64_t pocas : 1; 2929232812Sjmallett uint64_t fprch2 : 2; 2930232812Sjmallett uint64_t throttle_rd : 1; 2931232812Sjmallett uint64_t throttle_wr : 1; 2932232812Sjmallett uint64_t inorder_rd : 1; 2933232812Sjmallett uint64_t inorder_wr : 1; 2934232812Sjmallett uint64_t elev_prio_dis : 1; 2935232812Sjmallett uint64_t nxm_write_en : 1; 2936232812Sjmallett uint64_t max_write_batch : 4; 2937232812Sjmallett uint64_t xor_bank : 1; 2938232812Sjmallett uint64_t auto_dclkdis : 1; 2939232812Sjmallett uint64_t int_zqcs_dis : 1; 2940232812Sjmallett uint64_t ext_zqcs_dis : 1; 2941232812Sjmallett uint64_t bprch : 2; 2942232812Sjmallett uint64_t wodt_bprch : 1; 2943232812Sjmallett uint64_t rodt_bprch : 1; 2944232812Sjmallett uint64_t reserved_24_62 : 39; 2945232812Sjmallett uint64_t scramble_ena : 1; 2946232812Sjmallett#endif 2947232812Sjmallett } cn66xx; 2948232812Sjmallett struct cvmx_lmcx_control_cn68xx { 2949232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 2950232812Sjmallett uint64_t reserved_63_63 : 1; 2951232812Sjmallett uint64_t thrcnt : 12; /**< Fine Count */ 2952232812Sjmallett uint64_t persub : 8; /**< Offset for DFA rate-matching */ 2953232812Sjmallett uint64_t thrmax : 4; /**< Fine Rate Matching Max Bucket Size 2954232812Sjmallett 0 = Reserved 2955232812Sjmallett In conjunction with the Coarse Rate Matching Logic, the Fine Rate 2956232812Sjmallett Matching Logic gives SW the ability to prioritize DFA Rds over 2957232812Sjmallett L2C Writes. Higher PERSUB values result in a lower DFA Rd 2958232812Sjmallett bandwidth. */ 2959232812Sjmallett uint64_t crm_cnt : 5; /**< Coarse Count */ 2960232812Sjmallett uint64_t crm_thr : 5; /**< Coarse Rate Matching Threshold */ 2961232812Sjmallett uint64_t crm_max : 5; /**< Coarse Rate Matching Max Bucket Size 2962232812Sjmallett 0 = Reserved 2963232812Sjmallett The Coarse Rate Matching Logic is used to control the bandwidth 2964232812Sjmallett allocated to DFA Rds. CRM_MAX is subdivided into two regions 2965232812Sjmallett with DFA Rds being preferred over LMC Rd/Wrs when 2966232812Sjmallett CRM_CNT < CRM_THR. CRM_CNT increments by 1 when a DFA Rd is 2967232812Sjmallett slotted and by 2 when a LMC Rd/Wr is slotted, and rolls over 2968232812Sjmallett when CRM_MAX is reached. */ 2969232812Sjmallett uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2970232812Sjmallett RD cmd is delayed an additional CK cycle. */ 2971232812Sjmallett uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a 2972232812Sjmallett WR cmd is delayed an additional CK cycle. */ 2973232812Sjmallett uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for 2974232812Sjmallett the default DDR_DQ/DQS drivers is delayed an additional BPRCH 2975232812Sjmallett CK cycles. 2976232812Sjmallett 00 = 0 CKs 2977232812Sjmallett 01 = 1 CKs 2978232812Sjmallett 10 = 2 CKs 2979232812Sjmallett 11 = 3 CKs */ 2980232812Sjmallett uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration 2981232812Sjmallett When clear, LMC runs external ZQ calibration 2982232812Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2983232812Sjmallett uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration 2984232812Sjmallett When clear, LMC runs internal ZQ calibration 2985232812Sjmallett every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 2986232812Sjmallett uint64_t auto_dclkdis : 1; /**< When 1, LMC will automatically shut off its internal 2987232812Sjmallett clock to conserve power when there is no traffic. Note 2988232812Sjmallett that this has no effect on the DDR3 PHY and pads clocks. */ 2989232812Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 2990232812Sjmallett bank[2:0]=address[9:7] ^ address[14:12] 2991232812Sjmallett else 2992232812Sjmallett bank[2:0]=address[9:7] */ 2993232812Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 2994232812Sjmallett forcing reads to interrupt. */ 2995232812Sjmallett uint64_t nxm_write_en : 1; /**< NXM Write mode 2996232812Sjmallett When clear, LMC discards writes to addresses that don't 2997232812Sjmallett exist in the DRAM (as defined by LMC*_NXM configuration). 2998232812Sjmallett When set, LMC completes writes to addresses that don't 2999232812Sjmallett exist in the DRAM at an aliased address. */ 3000232812Sjmallett uint64_t elev_prio_dis : 1; /**< Disable elevate priority logic. 3001232812Sjmallett When set, writes are sent in 3002232812Sjmallett regardless of priority information from L2C. */ 3003232812Sjmallett uint64_t inorder_wr : 1; /**< Send writes in order(regardless of priority) */ 3004232812Sjmallett uint64_t inorder_rd : 1; /**< Send reads in order (regardless of priority) */ 3005232812Sjmallett uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes */ 3006232812Sjmallett uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads */ 3007232812Sjmallett uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off 3008232812Sjmallett time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier. 3009232812Sjmallett 00 = 0 CKs 3010232812Sjmallett 01 = 1 CKs 3011232812Sjmallett 10 = 2 CKs 3012232812Sjmallett 11 = RESERVED */ 3013232812Sjmallett uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3. 3014232812Sjmallett This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0, 3015232812Sjmallett and clear otherwise. */ 3016232812Sjmallett uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and 3017232812Sjmallett address. This mode helps relieve setup time pressure 3018232812Sjmallett on the Address and command bus which nominally have 3019232812Sjmallett a very large fanout. Please refer to Micron's tech 3020232812Sjmallett note tn_47_01 titled "DDR2-533 Memory Design Guide 3021232812Sjmallett for Two Dimm Unbuffered Systems" for physical details. */ 3022232812Sjmallett uint64_t bwcnt : 1; /**< Bus utilization counter Clear. 3023232812Sjmallett Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and 3024232812Sjmallett LMC*_DCLK_CNT registers. SW should first write this 3025232812Sjmallett field to a one, then write this field to a zero to 3026232812Sjmallett clear the CSR's. */ 3027232812Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3028232812Sjmallett of JEDEC Registered DIMMs which require address and 3029232812Sjmallett control bits to be registered in the controller. */ 3030232812Sjmallett#else 3031232812Sjmallett uint64_t rdimm_ena : 1; 3032232812Sjmallett uint64_t bwcnt : 1; 3033232812Sjmallett uint64_t ddr2t : 1; 3034232812Sjmallett uint64_t pocas : 1; 3035232812Sjmallett uint64_t fprch2 : 2; 3036232812Sjmallett uint64_t throttle_rd : 1; 3037232812Sjmallett uint64_t throttle_wr : 1; 3038232812Sjmallett uint64_t inorder_rd : 1; 3039232812Sjmallett uint64_t inorder_wr : 1; 3040232812Sjmallett uint64_t elev_prio_dis : 1; 3041232812Sjmallett uint64_t nxm_write_en : 1; 3042232812Sjmallett uint64_t max_write_batch : 4; 3043232812Sjmallett uint64_t xor_bank : 1; 3044232812Sjmallett uint64_t auto_dclkdis : 1; 3045232812Sjmallett uint64_t int_zqcs_dis : 1; 3046232812Sjmallett uint64_t ext_zqcs_dis : 1; 3047232812Sjmallett uint64_t bprch : 2; 3048232812Sjmallett uint64_t wodt_bprch : 1; 3049232812Sjmallett uint64_t rodt_bprch : 1; 3050232812Sjmallett uint64_t crm_max : 5; 3051232812Sjmallett uint64_t crm_thr : 5; 3052232812Sjmallett uint64_t crm_cnt : 5; 3053232812Sjmallett uint64_t thrmax : 4; 3054232812Sjmallett uint64_t persub : 8; 3055232812Sjmallett uint64_t thrcnt : 12; 3056232812Sjmallett uint64_t reserved_63_63 : 1; 3057232812Sjmallett#endif 3058232812Sjmallett } cn68xx; 3059232812Sjmallett struct cvmx_lmcx_control_cn68xx cn68xxp1; 3060232812Sjmallett struct cvmx_lmcx_control_cn66xx cnf71xx; 3061215976Sjmallett}; 3062215976Sjmalletttypedef union cvmx_lmcx_control cvmx_lmcx_control_t; 3063215976Sjmallett 3064215976Sjmallett/** 3065215976Sjmallett * cvmx_lmc#_ctl 3066215976Sjmallett * 3067215976Sjmallett * LMC_CTL = LMC Control 3068215976Sjmallett * This register is an assortment of various control fields needed by the memory controller 3069215976Sjmallett */ 3070232812Sjmallettunion cvmx_lmcx_ctl { 3071215976Sjmallett uint64_t u64; 3072232812Sjmallett struct cvmx_lmcx_ctl_s { 3073232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3074215976Sjmallett uint64_t reserved_32_63 : 32; 3075215976Sjmallett uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit 3076215976Sjmallett The encoded value on this will adjust the drive strength 3077215976Sjmallett of the DDR DQ pulldns. */ 3078215976Sjmallett uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit 3079215976Sjmallett The encoded value on this will adjust the drive strength 3080215976Sjmallett of the DDR DQ pullup. */ 3081215976Sjmallett uint64_t slow_scf : 1; /**< Should be cleared to zero */ 3082215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 3083215976Sjmallett bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] 3084215976Sjmallett else 3085215976Sjmallett bank[n:0]=address[n+7:7] 3086215976Sjmallett where n=1 for a 4 bank part and n=2 for an 8 bank part */ 3087215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 3088215976Sjmallett allowing reads to interrupt. */ 3089215976Sjmallett uint64_t pll_div2 : 1; /**< PLL Div2. */ 3090215976Sjmallett uint64_t pll_bypass : 1; /**< PLL Bypass. */ 3091215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3092215976Sjmallett of JEDEC Registered DIMMs which require Write 3093215976Sjmallett data to be registered in the controller. */ 3094215976Sjmallett uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans 3095215976Sjmallett will slot an additional 1 cycle data bus bubble to 3096215976Sjmallett avoid DQ/DQS bus contention. This is only a CYA bit, 3097215976Sjmallett in case the "built-in" DIMM and RANK crossing logic 3098215976Sjmallett which should auto-detect and perfectly slot 3099215976Sjmallett read-to-reads to the same DIMM/RANK. */ 3100215976Sjmallett uint64_t inorder_mwf : 1; /**< Reads as zero */ 3101215976Sjmallett uint64_t inorder_mrf : 1; /**< Always clear to zero */ 3102215976Sjmallett uint64_t reserved_10_11 : 2; 3103215976Sjmallett uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off 3104215976Sjmallett time for the DDR_DQ/DQS drivers is 1 dclk earlier. 3105215976Sjmallett This bit should typically be set. */ 3106215976Sjmallett uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for 3107215976Sjmallett the DDR_DQ/DQS drivers is delayed an additional DCLK 3108215976Sjmallett cycle. This should be set to one whenever both SILO_HC 3109215976Sjmallett and SILO_QC are set. */ 3110215976Sjmallett uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional 3111215976Sjmallett dclks to wait (on top of TCL+1+TSKW) before pulling 3112215976Sjmallett data out of the pad silos. 3113215976Sjmallett - 00: illegal 3114215976Sjmallett - 01: 1 dclks 3115215976Sjmallett - 10: 2 dclks 3116215976Sjmallett - 11: illegal 3117215976Sjmallett This should always be set to 1. */ 3118215976Sjmallett uint64_t tskw : 2; /**< This component is a representation of total BOARD 3119215976Sjmallett DELAY on DQ (used in the controller to determine the 3120215976Sjmallett R->W spacing to avoid DQS/DQ bus conflicts). Enter 3121215976Sjmallett the largest of the per byte Board delay 3122215976Sjmallett - 00: 0 dclk 3123215976Sjmallett - 01: 1 dclks 3124215976Sjmallett - 10: 2 dclks 3125215976Sjmallett - 11: 3 dclks */ 3126215976Sjmallett uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting 3127215976Sjmallett A non Zero value in this register 3128215976Sjmallett enables the On Die Termination (ODT) in DDR parts. 3129215976Sjmallett These two bits are loaded into the RTT 3130215976Sjmallett portion of the EMRS register bits A6 & A2. If DDR2's 3131215976Sjmallett termination (for the memory's DQ/DQS/DM pads) is not 3132215976Sjmallett desired, set it to 00. If it is, chose between 3133215976Sjmallett 01 for 75 ohm and 10 for 150 ohm termination. 3134215976Sjmallett 00 = ODT Disabled 3135215976Sjmallett 01 = 75 ohm Termination 3136215976Sjmallett 10 = 150 ohm Termination 3137215976Sjmallett 11 = 50 ohm Termination 3138215976Sjmallett Octeon, on writes, by default, drives the 4/8 ODT 3139215976Sjmallett pins (64/128b mode) based on what the masks 3140215976Sjmallett (LMC_WODT_CTL) are programmed to. 3141215976Sjmallett LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins 3142215976Sjmallett for READS. LMC_RODT_CTL needs to be programmed based 3143215976Sjmallett on the system's needs for ODT. */ 3144215976Sjmallett uint64_t dic : 2; /**< Drive Strength Control: 3145215976Sjmallett DIC[0] is 3146215976Sjmallett loaded into the Extended Mode Register (EMRS) A1 bit 3147215976Sjmallett during initialization. 3148215976Sjmallett 0 = Normal 3149215976Sjmallett 1 = Reduced 3150215976Sjmallett DIC[1] is used to load into EMRS 3151215976Sjmallett bit 10 - DQSN Enable/Disable field. By default, we 3152215976Sjmallett program the DDR's to drive the DQSN also. Set it to 3153215976Sjmallett 1 if DQSN should be Hi-Z. 3154215976Sjmallett 0 - DQSN Enable 3155215976Sjmallett 1 - DQSN Disable */ 3156215976Sjmallett#else 3157215976Sjmallett uint64_t dic : 2; 3158215976Sjmallett uint64_t qs_dic : 2; 3159215976Sjmallett uint64_t tskw : 2; 3160215976Sjmallett uint64_t sil_lat : 2; 3161215976Sjmallett uint64_t bprch : 1; 3162215976Sjmallett uint64_t fprch2 : 1; 3163215976Sjmallett uint64_t reserved_10_11 : 2; 3164215976Sjmallett uint64_t inorder_mrf : 1; 3165215976Sjmallett uint64_t inorder_mwf : 1; 3166215976Sjmallett uint64_t r2r_slot : 1; 3167215976Sjmallett uint64_t rdimm_ena : 1; 3168215976Sjmallett uint64_t pll_bypass : 1; 3169215976Sjmallett uint64_t pll_div2 : 1; 3170215976Sjmallett uint64_t max_write_batch : 4; 3171215976Sjmallett uint64_t xor_bank : 1; 3172215976Sjmallett uint64_t slow_scf : 1; 3173215976Sjmallett uint64_t ddr__pctl : 4; 3174215976Sjmallett uint64_t ddr__nctl : 4; 3175215976Sjmallett uint64_t reserved_32_63 : 32; 3176215976Sjmallett#endif 3177215976Sjmallett } s; 3178232812Sjmallett struct cvmx_lmcx_ctl_cn30xx { 3179232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3180215976Sjmallett uint64_t reserved_32_63 : 32; 3181215976Sjmallett uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit 3182215976Sjmallett The encoded value on this will adjust the drive strength 3183215976Sjmallett of the DDR DQ pulldns. */ 3184215976Sjmallett uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit 3185215976Sjmallett The encoded value on this will adjust the drive strength 3186215976Sjmallett of the DDR DQ pullup. */ 3187215976Sjmallett uint64_t slow_scf : 1; /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency 3188215976Sjmallett when compared to pass1 */ 3189215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 3190215976Sjmallett bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] 3191215976Sjmallett else 3192215976Sjmallett bank[n:0]=address[n+7:7] 3193215976Sjmallett where n=1 for a 4 bank part and n=2 for an 8 bank part */ 3194215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 3195215976Sjmallett allowing reads to interrupt. */ 3196215976Sjmallett uint64_t pll_div2 : 1; /**< PLL Div2. */ 3197215976Sjmallett uint64_t pll_bypass : 1; /**< PLL Bypass. */ 3198215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3199215976Sjmallett of JEDEC Registered DIMMs which require Write 3200215976Sjmallett data to be registered in the controller. */ 3201215976Sjmallett uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans 3202215976Sjmallett will slot an additional 1 cycle data bus bubble to 3203215976Sjmallett avoid DQ/DQS bus contention. This is only a CYA bit, 3204215976Sjmallett in case the "built-in" DIMM and RANK crossing logic 3205215976Sjmallett which should auto-detect and perfectly slot 3206215976Sjmallett read-to-reads to the same DIMM/RANK. */ 3207215976Sjmallett uint64_t inorder_mwf : 1; /**< Reads as zero */ 3208215976Sjmallett uint64_t inorder_mrf : 1; /**< Always set to zero */ 3209215976Sjmallett uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the 3210215976Sjmallett Dclk domain is (DRESET || ECLK_RESET). */ 3211215976Sjmallett uint64_t mode32b : 1; /**< 32b data Path Mode 3212215976Sjmallett Set to 1 if we use only 32 DQ pins 3213215976Sjmallett 0 for 16b DQ mode. */ 3214215976Sjmallett uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off 3215215976Sjmallett time for the DDR_DQ/DQS drivers is 1 dclk earlier. 3216215976Sjmallett This bit should typically be set. */ 3217215976Sjmallett uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for 3218215976Sjmallett the DDR_DQ/DQS drivers is delayed an additional DCLK 3219215976Sjmallett cycle. This should be set to one whenever both SILO_HC 3220215976Sjmallett and SILO_QC are set. */ 3221215976Sjmallett uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional 3222215976Sjmallett dclks to wait (on top of TCL+1+TSKW) before pulling 3223215976Sjmallett data out of the pad silos. 3224215976Sjmallett - 00: illegal 3225215976Sjmallett - 01: 1 dclks 3226215976Sjmallett - 10: 2 dclks 3227215976Sjmallett - 11: illegal 3228215976Sjmallett This should always be set to 1. */ 3229215976Sjmallett uint64_t tskw : 2; /**< This component is a representation of total BOARD 3230215976Sjmallett DELAY on DQ (used in the controller to determine the 3231215976Sjmallett R->W spacing to avoid DQS/DQ bus conflicts). Enter 3232215976Sjmallett the largest of the per byte Board delay 3233215976Sjmallett - 00: 0 dclk 3234215976Sjmallett - 01: 1 dclks 3235215976Sjmallett - 10: 2 dclks 3236215976Sjmallett - 11: 3 dclks */ 3237215976Sjmallett uint64_t qs_dic : 2; /**< QS Drive Strength Control (DDR1): 3238215976Sjmallett & DDR2 Termination Resistor Setting 3239215976Sjmallett When in DDR2, a non Zero value in this register 3240215976Sjmallett enables the On Die Termination (ODT) in DDR parts. 3241215976Sjmallett These two bits are loaded into the RTT 3242215976Sjmallett portion of the EMRS register bits A6 & A2. If DDR2's 3243215976Sjmallett termination (for the memory's DQ/DQS/DM pads) is not 3244215976Sjmallett desired, set it to 00. If it is, chose between 3245215976Sjmallett 01 for 75 ohm and 10 for 150 ohm termination. 3246215976Sjmallett 00 = ODT Disabled 3247215976Sjmallett 01 = 75 ohm Termination 3248215976Sjmallett 10 = 150 ohm Termination 3249215976Sjmallett 11 = 50 ohm Termination 3250215976Sjmallett Octeon, on writes, by default, drives the 8 ODT 3251215976Sjmallett pins based on what the masks (LMC_WODT_CTL1 & 2) 3252215976Sjmallett are programmed to. LMC_DDR2_CTL->ODT_ENA 3253215976Sjmallett enables Octeon to drive ODT pins for READS. 3254215976Sjmallett LMC_RODT_CTL needs to be programmed based on 3255215976Sjmallett the system's needs for ODT. */ 3256215976Sjmallett uint64_t dic : 2; /**< Drive Strength Control: 3257215976Sjmallett For DDR-I/II Mode, DIC[0] is 3258215976Sjmallett loaded into the Extended Mode Register (EMRS) A1 bit 3259215976Sjmallett during initialization. (see DDR-I data sheet EMRS 3260215976Sjmallett description) 3261215976Sjmallett 0 = Normal 3262215976Sjmallett 1 = Reduced 3263215976Sjmallett For DDR-II Mode, DIC[1] is used to load into EMRS 3264215976Sjmallett bit 10 - DQSN Enable/Disable field. By default, we 3265215976Sjmallett program the DDR's to drive the DQSN also. Set it to 3266215976Sjmallett 1 if DQSN should be Hi-Z. 3267215976Sjmallett 0 - DQSN Enable 3268215976Sjmallett 1 - DQSN Disable */ 3269215976Sjmallett#else 3270215976Sjmallett uint64_t dic : 2; 3271215976Sjmallett uint64_t qs_dic : 2; 3272215976Sjmallett uint64_t tskw : 2; 3273215976Sjmallett uint64_t sil_lat : 2; 3274215976Sjmallett uint64_t bprch : 1; 3275215976Sjmallett uint64_t fprch2 : 1; 3276215976Sjmallett uint64_t mode32b : 1; 3277215976Sjmallett uint64_t dreset : 1; 3278215976Sjmallett uint64_t inorder_mrf : 1; 3279215976Sjmallett uint64_t inorder_mwf : 1; 3280215976Sjmallett uint64_t r2r_slot : 1; 3281215976Sjmallett uint64_t rdimm_ena : 1; 3282215976Sjmallett uint64_t pll_bypass : 1; 3283215976Sjmallett uint64_t pll_div2 : 1; 3284215976Sjmallett uint64_t max_write_batch : 4; 3285215976Sjmallett uint64_t xor_bank : 1; 3286215976Sjmallett uint64_t slow_scf : 1; 3287215976Sjmallett uint64_t ddr__pctl : 4; 3288215976Sjmallett uint64_t ddr__nctl : 4; 3289215976Sjmallett uint64_t reserved_32_63 : 32; 3290215976Sjmallett#endif 3291215976Sjmallett } cn30xx; 3292215976Sjmallett struct cvmx_lmcx_ctl_cn30xx cn31xx; 3293232812Sjmallett struct cvmx_lmcx_ctl_cn38xx { 3294232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3295215976Sjmallett uint64_t reserved_32_63 : 32; 3296215976Sjmallett uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit 3297215976Sjmallett The encoded value on this will adjust the drive strength 3298215976Sjmallett of the DDR DQ pulldns. */ 3299215976Sjmallett uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit 3300215976Sjmallett The encoded value on this will adjust the drive strength 3301215976Sjmallett of the DDR DQ pullup. */ 3302215976Sjmallett uint64_t slow_scf : 1; /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency 3303215976Sjmallett when compared to pass1 3304215976Sjmallett NOTE - This bit has NO effect in PASS1 */ 3305215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 3306215976Sjmallett bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] 3307215976Sjmallett else 3308215976Sjmallett bank[n:0]=address[n+7:7] 3309215976Sjmallett where n=1 for a 4 bank part and n=2 for an 8 bank part */ 3310215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 3311215976Sjmallett allowing reads to interrupt. */ 3312215976Sjmallett uint64_t reserved_16_17 : 2; 3313215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3314215976Sjmallett of JEDEC Registered DIMMs which require Write 3315215976Sjmallett data to be registered in the controller. */ 3316215976Sjmallett uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans 3317215976Sjmallett will slot an additional 1 cycle data bus bubble to 3318215976Sjmallett avoid DQ/DQS bus contention. This is only a CYA bit, 3319215976Sjmallett in case the "built-in" DIMM and RANK crossing logic 3320215976Sjmallett which should auto-detect and perfectly slot 3321215976Sjmallett read-to-reads to the same DIMM/RANK. */ 3322215976Sjmallett uint64_t inorder_mwf : 1; /**< When set, forces LMC_MWF (writes) into strict, in-order 3323215976Sjmallett mode. When clear, writes may be serviced out of order 3324215976Sjmallett (optimized to keep multiple banks active). 3325215976Sjmallett This bit is ONLY to be set at power-on and 3326215976Sjmallett should not be set for normal use. 3327215976Sjmallett NOTE: For PASS1, set as follows: 3328215976Sjmallett DDR-I -> 1 3329215976Sjmallett DDR-II -> 0 3330215976Sjmallett For Pass2, this bit is RA0, write ignore (this feature 3331215976Sjmallett is permanently disabled) */ 3332215976Sjmallett uint64_t inorder_mrf : 1; /**< When set, forces LMC_MRF (reads) into strict, in-order 3333215976Sjmallett mode. When clear, reads may be serviced out of order 3334215976Sjmallett (optimized to keep multiple banks active). 3335215976Sjmallett This bit is ONLY to be set at power-on and 3336215976Sjmallett should not be set for normal use. 3337215976Sjmallett NOTE: For PASS1, set as follows: 3338215976Sjmallett DDR-I -> 1 3339215976Sjmallett DDR-II -> 0 3340215976Sjmallett For Pass2, this bit should be written ZERO for 3341215976Sjmallett DDR I & II */ 3342215976Sjmallett uint64_t set_zero : 1; /**< Reserved. Always Set this Bit to Zero */ 3343215976Sjmallett uint64_t mode128b : 1; /**< 128b data Path Mode 3344215976Sjmallett Set to 1 if we use all 128 DQ pins 3345215976Sjmallett 0 for 64b DQ mode. */ 3346215976Sjmallett uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off 3347215976Sjmallett time for the DDR_DQ/DQS drivers is 1 dclk earlier. 3348215976Sjmallett This bit should typically be set. */ 3349215976Sjmallett uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for 3350215976Sjmallett the DDR_DQ/DQS drivers is delayed an additional DCLK 3351215976Sjmallett cycle. This should be set to one whenever both SILO_HC 3352215976Sjmallett and SILO_QC are set. */ 3353215976Sjmallett uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional 3354215976Sjmallett dclks to wait (on top of TCL+1+TSKW) before pulling 3355215976Sjmallett data out of the pad silos. 3356215976Sjmallett - 00: illegal 3357215976Sjmallett - 01: 1 dclks 3358215976Sjmallett - 10: 2 dclks 3359215976Sjmallett - 11: illegal 3360215976Sjmallett This should always be set to 1. */ 3361215976Sjmallett uint64_t tskw : 2; /**< This component is a representation of total BOARD 3362215976Sjmallett DELAY on DQ (used in the controller to determine the 3363215976Sjmallett R->W spacing to avoid DQS/DQ bus conflicts). Enter 3364215976Sjmallett the largest of the per byte Board delay 3365215976Sjmallett - 00: 0 dclk 3366215976Sjmallett - 01: 1 dclks 3367215976Sjmallett - 10: 2 dclks 3368215976Sjmallett - 11: 3 dclks */ 3369215976Sjmallett uint64_t qs_dic : 2; /**< QS Drive Strength Control (DDR1): 3370215976Sjmallett & DDR2 Termination Resistor Setting 3371215976Sjmallett When in DDR2, a non Zero value in this register 3372215976Sjmallett enables the On Die Termination (ODT) in DDR parts. 3373215976Sjmallett These two bits are loaded into the RTT 3374215976Sjmallett portion of the EMRS register bits A6 & A2. If DDR2's 3375215976Sjmallett termination (for the memory's DQ/DQS/DM pads) is not 3376215976Sjmallett desired, set it to 00. If it is, chose between 3377215976Sjmallett 01 for 75 ohm and 10 for 150 ohm termination. 3378215976Sjmallett 00 = ODT Disabled 3379215976Sjmallett 01 = 75 ohm Termination 3380215976Sjmallett 10 = 150 ohm Termination 3381215976Sjmallett 11 = 50 ohm Termination 3382215976Sjmallett Octeon, on writes, by default, drives the 4/8 ODT 3383215976Sjmallett pins (64/128b mode) based on what the masks 3384215976Sjmallett (LMC_WODT_CTL) are programmed to. 3385215976Sjmallett LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins 3386215976Sjmallett for READS. LMC_RODT_CTL needs to be programmed based 3387215976Sjmallett on the system's needs for ODT. */ 3388215976Sjmallett uint64_t dic : 2; /**< Drive Strength Control: 3389215976Sjmallett For DDR-I/II Mode, DIC[0] is 3390215976Sjmallett loaded into the Extended Mode Register (EMRS) A1 bit 3391215976Sjmallett during initialization. (see DDR-I data sheet EMRS 3392215976Sjmallett description) 3393215976Sjmallett 0 = Normal 3394215976Sjmallett 1 = Reduced 3395215976Sjmallett For DDR-II Mode, DIC[1] is used to load into EMRS 3396215976Sjmallett bit 10 - DQSN Enable/Disable field. By default, we 3397215976Sjmallett program the DDR's to drive the DQSN also. Set it to 3398215976Sjmallett 1 if DQSN should be Hi-Z. 3399215976Sjmallett 0 - DQSN Enable 3400215976Sjmallett 1 - DQSN Disable */ 3401215976Sjmallett#else 3402215976Sjmallett uint64_t dic : 2; 3403215976Sjmallett uint64_t qs_dic : 2; 3404215976Sjmallett uint64_t tskw : 2; 3405215976Sjmallett uint64_t sil_lat : 2; 3406215976Sjmallett uint64_t bprch : 1; 3407215976Sjmallett uint64_t fprch2 : 1; 3408215976Sjmallett uint64_t mode128b : 1; 3409215976Sjmallett uint64_t set_zero : 1; 3410215976Sjmallett uint64_t inorder_mrf : 1; 3411215976Sjmallett uint64_t inorder_mwf : 1; 3412215976Sjmallett uint64_t r2r_slot : 1; 3413215976Sjmallett uint64_t rdimm_ena : 1; 3414215976Sjmallett uint64_t reserved_16_17 : 2; 3415215976Sjmallett uint64_t max_write_batch : 4; 3416215976Sjmallett uint64_t xor_bank : 1; 3417215976Sjmallett uint64_t slow_scf : 1; 3418215976Sjmallett uint64_t ddr__pctl : 4; 3419215976Sjmallett uint64_t ddr__nctl : 4; 3420215976Sjmallett uint64_t reserved_32_63 : 32; 3421215976Sjmallett#endif 3422215976Sjmallett } cn38xx; 3423215976Sjmallett struct cvmx_lmcx_ctl_cn38xx cn38xxp2; 3424232812Sjmallett struct cvmx_lmcx_ctl_cn50xx { 3425232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3426215976Sjmallett uint64_t reserved_32_63 : 32; 3427215976Sjmallett uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit 3428215976Sjmallett The encoded value on this will adjust the drive strength 3429215976Sjmallett of the DDR DQ pulldns. */ 3430215976Sjmallett uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit 3431215976Sjmallett The encoded value on this will adjust the drive strength 3432215976Sjmallett of the DDR DQ pullup. */ 3433215976Sjmallett uint64_t slow_scf : 1; /**< Should be cleared to zero */ 3434215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 3435215976Sjmallett bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] 3436215976Sjmallett else 3437215976Sjmallett bank[n:0]=address[n+7:7] 3438215976Sjmallett where n=1 for a 4 bank part and n=2 for an 8 bank part */ 3439215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 3440215976Sjmallett allowing reads to interrupt. */ 3441215976Sjmallett uint64_t reserved_17_17 : 1; 3442215976Sjmallett uint64_t pll_bypass : 1; /**< PLL Bypass. */ 3443215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3444215976Sjmallett of JEDEC Registered DIMMs which require Write 3445215976Sjmallett data to be registered in the controller. */ 3446215976Sjmallett uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans 3447215976Sjmallett will slot an additional 1 cycle data bus bubble to 3448215976Sjmallett avoid DQ/DQS bus contention. This is only a CYA bit, 3449215976Sjmallett in case the "built-in" DIMM and RANK crossing logic 3450215976Sjmallett which should auto-detect and perfectly slot 3451215976Sjmallett read-to-reads to the same DIMM/RANK. */ 3452215976Sjmallett uint64_t inorder_mwf : 1; /**< Reads as zero */ 3453215976Sjmallett uint64_t inorder_mrf : 1; /**< Always clear to zero */ 3454215976Sjmallett uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the 3455215976Sjmallett Dclk domain is (DRESET || ECLK_RESET). */ 3456215976Sjmallett uint64_t mode32b : 1; /**< 32b data Path Mode 3457215976Sjmallett Set to 1 if we use 32 DQ pins 3458215976Sjmallett 0 for 16b DQ mode. */ 3459215976Sjmallett uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off 3460215976Sjmallett time for the DDR_DQ/DQS drivers is 1 dclk earlier. 3461215976Sjmallett This bit should typically be set. */ 3462215976Sjmallett uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for 3463215976Sjmallett the DDR_DQ/DQS drivers is delayed an additional DCLK 3464215976Sjmallett cycle. This should be set to one whenever both SILO_HC 3465215976Sjmallett and SILO_QC are set. */ 3466215976Sjmallett uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional 3467215976Sjmallett dclks to wait (on top of TCL+1+TSKW) before pulling 3468215976Sjmallett data out of the pad silos. 3469215976Sjmallett - 00: illegal 3470215976Sjmallett - 01: 1 dclks 3471215976Sjmallett - 10: 2 dclks 3472215976Sjmallett - 11: illegal 3473215976Sjmallett This should always be set to 1. */ 3474215976Sjmallett uint64_t tskw : 2; /**< This component is a representation of total BOARD 3475215976Sjmallett DELAY on DQ (used in the controller to determine the 3476215976Sjmallett R->W spacing to avoid DQS/DQ bus conflicts). Enter 3477215976Sjmallett the largest of the per byte Board delay 3478215976Sjmallett - 00: 0 dclk 3479215976Sjmallett - 01: 1 dclks 3480215976Sjmallett - 10: 2 dclks 3481215976Sjmallett - 11: 3 dclks */ 3482215976Sjmallett uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting 3483215976Sjmallett When in DDR2, a non Zero value in this register 3484215976Sjmallett enables the On Die Termination (ODT) in DDR parts. 3485215976Sjmallett These two bits are loaded into the RTT 3486215976Sjmallett portion of the EMRS register bits A6 & A2. If DDR2's 3487215976Sjmallett termination (for the memory's DQ/DQS/DM pads) is not 3488215976Sjmallett desired, set it to 00. If it is, chose between 3489215976Sjmallett 01 for 75 ohm and 10 for 150 ohm termination. 3490215976Sjmallett 00 = ODT Disabled 3491215976Sjmallett 01 = 75 ohm Termination 3492215976Sjmallett 10 = 150 ohm Termination 3493215976Sjmallett 11 = 50 ohm Termination 3494215976Sjmallett Octeon, on writes, by default, drives the ODT 3495215976Sjmallett pins based on what the masks 3496215976Sjmallett (LMC_WODT_CTL) are programmed to. 3497215976Sjmallett LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins 3498215976Sjmallett for READS. LMC_RODT_CTL needs to be programmed based 3499215976Sjmallett on the system's needs for ODT. */ 3500215976Sjmallett uint64_t dic : 2; /**< Drive Strength Control: 3501215976Sjmallett DIC[0] is 3502215976Sjmallett loaded into the Extended Mode Register (EMRS) A1 bit 3503215976Sjmallett during initialization. 3504215976Sjmallett 0 = Normal 3505215976Sjmallett 1 = Reduced 3506215976Sjmallett DIC[1] is used to load into EMRS 3507215976Sjmallett bit 10 - DQSN Enable/Disable field. By default, we 3508215976Sjmallett program the DDR's to drive the DQSN also. Set it to 3509215976Sjmallett 1 if DQSN should be Hi-Z. 3510215976Sjmallett 0 - DQSN Enable 3511215976Sjmallett 1 - DQSN Disable */ 3512215976Sjmallett#else 3513215976Sjmallett uint64_t dic : 2; 3514215976Sjmallett uint64_t qs_dic : 2; 3515215976Sjmallett uint64_t tskw : 2; 3516215976Sjmallett uint64_t sil_lat : 2; 3517215976Sjmallett uint64_t bprch : 1; 3518215976Sjmallett uint64_t fprch2 : 1; 3519215976Sjmallett uint64_t mode32b : 1; 3520215976Sjmallett uint64_t dreset : 1; 3521215976Sjmallett uint64_t inorder_mrf : 1; 3522215976Sjmallett uint64_t inorder_mwf : 1; 3523215976Sjmallett uint64_t r2r_slot : 1; 3524215976Sjmallett uint64_t rdimm_ena : 1; 3525215976Sjmallett uint64_t pll_bypass : 1; 3526215976Sjmallett uint64_t reserved_17_17 : 1; 3527215976Sjmallett uint64_t max_write_batch : 4; 3528215976Sjmallett uint64_t xor_bank : 1; 3529215976Sjmallett uint64_t slow_scf : 1; 3530215976Sjmallett uint64_t ddr__pctl : 4; 3531215976Sjmallett uint64_t ddr__nctl : 4; 3532215976Sjmallett uint64_t reserved_32_63 : 32; 3533215976Sjmallett#endif 3534215976Sjmallett } cn50xx; 3535232812Sjmallett struct cvmx_lmcx_ctl_cn52xx { 3536232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3537215976Sjmallett uint64_t reserved_32_63 : 32; 3538215976Sjmallett uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit 3539215976Sjmallett The encoded value on this will adjust the drive strength 3540215976Sjmallett of the DDR DQ pulldns. */ 3541215976Sjmallett uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit 3542215976Sjmallett The encoded value on this will adjust the drive strength 3543215976Sjmallett of the DDR DQ pullup. */ 3544215976Sjmallett uint64_t slow_scf : 1; /**< Always clear to zero */ 3545215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 3546215976Sjmallett bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] 3547215976Sjmallett else 3548215976Sjmallett bank[n:0]=address[n+7:7] 3549215976Sjmallett where n=1 for a 4 bank part and n=2 for an 8 bank part */ 3550215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 3551215976Sjmallett allowing reads to interrupt. */ 3552215976Sjmallett uint64_t reserved_16_17 : 2; 3553215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3554215976Sjmallett of JEDEC Registered DIMMs which require Write 3555215976Sjmallett data to be registered in the controller. */ 3556215976Sjmallett uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans 3557215976Sjmallett will slot an additional 1 cycle data bus bubble to 3558215976Sjmallett avoid DQ/DQS bus contention. This is only a CYA bit, 3559215976Sjmallett in case the "built-in" DIMM and RANK crossing logic 3560215976Sjmallett which should auto-detect and perfectly slot 3561215976Sjmallett read-to-reads to the same DIMM/RANK. */ 3562215976Sjmallett uint64_t inorder_mwf : 1; /**< Reads as zero */ 3563215976Sjmallett uint64_t inorder_mrf : 1; /**< Always set to zero */ 3564215976Sjmallett uint64_t dreset : 1; /**< MBZ 3565215976Sjmallett THIS IS OBSOLETE. Use LMC_DLL_CTL[DRESET] instead. */ 3566215976Sjmallett uint64_t mode32b : 1; /**< 32b data Path Mode 3567215976Sjmallett Set to 1 if we use only 32 DQ pins 3568215976Sjmallett 0 for 64b DQ mode. */ 3569215976Sjmallett uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off 3570215976Sjmallett time for the DDR_DQ/DQS drivers is 1 dclk earlier. 3571215976Sjmallett This bit should typically be set. */ 3572215976Sjmallett uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for 3573215976Sjmallett the DDR_DQ/DQS drivers is delayed an additional DCLK 3574215976Sjmallett cycle. This should be set to one whenever both SILO_HC 3575215976Sjmallett and SILO_QC are set. */ 3576215976Sjmallett uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional 3577215976Sjmallett dclks to wait (on top of TCL+1+TSKW) before pulling 3578215976Sjmallett data out of the pad silos. 3579215976Sjmallett - 00: illegal 3580215976Sjmallett - 01: 1 dclks 3581215976Sjmallett - 10: 2 dclks 3582215976Sjmallett - 11: illegal 3583215976Sjmallett This should always be set to 1. 3584215976Sjmallett THIS IS OBSOLETE. Use READ_LEVEL_RANK instead. */ 3585215976Sjmallett uint64_t tskw : 2; /**< This component is a representation of total BOARD 3586215976Sjmallett DELAY on DQ (used in the controller to determine the 3587215976Sjmallett R->W spacing to avoid DQS/DQ bus conflicts). Enter 3588215976Sjmallett the largest of the per byte Board delay 3589215976Sjmallett - 00: 0 dclk 3590215976Sjmallett - 01: 1 dclks 3591215976Sjmallett - 10: 2 dclks 3592215976Sjmallett - 11: 3 dclks 3593215976Sjmallett THIS IS OBSOLETE. Use READ_LEVEL_RANK instead. */ 3594215976Sjmallett uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting 3595215976Sjmallett When in DDR2, a non Zero value in this register 3596215976Sjmallett enables the On Die Termination (ODT) in DDR parts. 3597215976Sjmallett These two bits are loaded into the RTT 3598215976Sjmallett portion of the EMRS register bits A6 & A2. If DDR2's 3599215976Sjmallett termination (for the memory's DQ/DQS/DM pads) is not 3600215976Sjmallett desired, set it to 00. If it is, chose between 3601215976Sjmallett 01 for 75 ohm and 10 for 150 ohm termination. 3602215976Sjmallett 00 = ODT Disabled 3603215976Sjmallett 01 = 75 ohm Termination 3604215976Sjmallett 10 = 150 ohm Termination 3605215976Sjmallett 11 = 50 ohm Termination 3606215976Sjmallett Octeon, on writes, by default, drives the 4/8 ODT 3607215976Sjmallett pins (64/128b mode) based on what the masks 3608215976Sjmallett (LMC_WODT_CTL0 & 1) are programmed to. 3609215976Sjmallett LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins 3610215976Sjmallett for READS. LMC_RODT_CTL needs to be programmed based 3611215976Sjmallett on the system's needs for ODT. */ 3612215976Sjmallett uint64_t dic : 2; /**< Drive Strength Control: 3613215976Sjmallett DIC[0] is 3614215976Sjmallett loaded into the Extended Mode Register (EMRS) A1 bit 3615215976Sjmallett during initialization. 3616215976Sjmallett 0 = Normal 3617215976Sjmallett 1 = Reduced 3618215976Sjmallett DIC[1] is used to load into EMRS 3619215976Sjmallett bit 10 - DQSN Enable/Disable field. By default, we 3620215976Sjmallett program the DDR's to drive the DQSN also. Set it to 3621215976Sjmallett 1 if DQSN should be Hi-Z. 3622215976Sjmallett 0 - DQSN Enable 3623215976Sjmallett 1 - DQSN Disable */ 3624215976Sjmallett#else 3625215976Sjmallett uint64_t dic : 2; 3626215976Sjmallett uint64_t qs_dic : 2; 3627215976Sjmallett uint64_t tskw : 2; 3628215976Sjmallett uint64_t sil_lat : 2; 3629215976Sjmallett uint64_t bprch : 1; 3630215976Sjmallett uint64_t fprch2 : 1; 3631215976Sjmallett uint64_t mode32b : 1; 3632215976Sjmallett uint64_t dreset : 1; 3633215976Sjmallett uint64_t inorder_mrf : 1; 3634215976Sjmallett uint64_t inorder_mwf : 1; 3635215976Sjmallett uint64_t r2r_slot : 1; 3636215976Sjmallett uint64_t rdimm_ena : 1; 3637215976Sjmallett uint64_t reserved_16_17 : 2; 3638215976Sjmallett uint64_t max_write_batch : 4; 3639215976Sjmallett uint64_t xor_bank : 1; 3640215976Sjmallett uint64_t slow_scf : 1; 3641215976Sjmallett uint64_t ddr__pctl : 4; 3642215976Sjmallett uint64_t ddr__nctl : 4; 3643215976Sjmallett uint64_t reserved_32_63 : 32; 3644215976Sjmallett#endif 3645215976Sjmallett } cn52xx; 3646215976Sjmallett struct cvmx_lmcx_ctl_cn52xx cn52xxp1; 3647215976Sjmallett struct cvmx_lmcx_ctl_cn52xx cn56xx; 3648215976Sjmallett struct cvmx_lmcx_ctl_cn52xx cn56xxp1; 3649232812Sjmallett struct cvmx_lmcx_ctl_cn58xx { 3650232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3651215976Sjmallett uint64_t reserved_32_63 : 32; 3652215976Sjmallett uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit 3653215976Sjmallett The encoded value on this will adjust the drive strength 3654215976Sjmallett of the DDR DQ pulldns. */ 3655215976Sjmallett uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit 3656215976Sjmallett The encoded value on this will adjust the drive strength 3657215976Sjmallett of the DDR DQ pullup. */ 3658215976Sjmallett uint64_t slow_scf : 1; /**< Should be cleared to zero */ 3659215976Sjmallett uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then 3660215976Sjmallett bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] 3661215976Sjmallett else 3662215976Sjmallett bank[n:0]=address[n+7:7] 3663215976Sjmallett where n=1 for a 4 bank part and n=2 for an 8 bank part */ 3664215976Sjmallett uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before 3665215976Sjmallett allowing reads to interrupt. */ 3666215976Sjmallett uint64_t reserved_16_17 : 2; 3667215976Sjmallett uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use 3668215976Sjmallett of JEDEC Registered DIMMs which require Write 3669215976Sjmallett data to be registered in the controller. */ 3670215976Sjmallett uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans 3671215976Sjmallett will slot an additional 1 cycle data bus bubble to 3672215976Sjmallett avoid DQ/DQS bus contention. This is only a CYA bit, 3673215976Sjmallett in case the "built-in" DIMM and RANK crossing logic 3674215976Sjmallett which should auto-detect and perfectly slot 3675215976Sjmallett read-to-reads to the same DIMM/RANK. */ 3676215976Sjmallett uint64_t inorder_mwf : 1; /**< Reads as zero */ 3677215976Sjmallett uint64_t inorder_mrf : 1; /**< Always clear to zero */ 3678215976Sjmallett uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the 3679215976Sjmallett Dclk domain is (DRESET || ECLK_RESET). */ 3680215976Sjmallett uint64_t mode128b : 1; /**< 128b data Path Mode 3681215976Sjmallett Set to 1 if we use all 128 DQ pins 3682215976Sjmallett 0 for 64b DQ mode. */ 3683215976Sjmallett uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off 3684215976Sjmallett time for the DDR_DQ/DQS drivers is 1 dclk earlier. 3685215976Sjmallett This bit should typically be set. */ 3686215976Sjmallett uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for 3687215976Sjmallett the DDR_DQ/DQS drivers is delayed an additional DCLK 3688215976Sjmallett cycle. This should be set to one whenever both SILO_HC 3689215976Sjmallett and SILO_QC are set. */ 3690215976Sjmallett uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional 3691215976Sjmallett dclks to wait (on top of TCL+1+TSKW) before pulling 3692215976Sjmallett data out of the pad silos. 3693215976Sjmallett - 00: illegal 3694215976Sjmallett - 01: 1 dclks 3695215976Sjmallett - 10: 2 dclks 3696215976Sjmallett - 11: illegal 3697215976Sjmallett This should always be set to 1. */ 3698215976Sjmallett uint64_t tskw : 2; /**< This component is a representation of total BOARD 3699215976Sjmallett DELAY on DQ (used in the controller to determine the 3700215976Sjmallett R->W spacing to avoid DQS/DQ bus conflicts). Enter 3701215976Sjmallett the largest of the per byte Board delay 3702215976Sjmallett - 00: 0 dclk 3703215976Sjmallett - 01: 1 dclks 3704215976Sjmallett - 10: 2 dclks 3705215976Sjmallett - 11: 3 dclks */ 3706215976Sjmallett uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting 3707215976Sjmallett A non Zero value in this register 3708215976Sjmallett enables the On Die Termination (ODT) in DDR parts. 3709215976Sjmallett These two bits are loaded into the RTT 3710215976Sjmallett portion of the EMRS register bits A6 & A2. If DDR2's 3711215976Sjmallett termination (for the memory's DQ/DQS/DM pads) is not 3712215976Sjmallett desired, set it to 00. If it is, chose between 3713215976Sjmallett 01 for 75 ohm and 10 for 150 ohm termination. 3714215976Sjmallett 00 = ODT Disabled 3715215976Sjmallett 01 = 75 ohm Termination 3716215976Sjmallett 10 = 150 ohm Termination 3717215976Sjmallett 11 = 50 ohm Termination 3718215976Sjmallett Octeon, on writes, by default, drives the 4/8 ODT 3719215976Sjmallett pins (64/128b mode) based on what the masks 3720215976Sjmallett (LMC_WODT_CTL) are programmed to. 3721215976Sjmallett LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins 3722215976Sjmallett for READS. LMC_RODT_CTL needs to be programmed based 3723215976Sjmallett on the system's needs for ODT. */ 3724215976Sjmallett uint64_t dic : 2; /**< Drive Strength Control: 3725215976Sjmallett DIC[0] is 3726215976Sjmallett loaded into the Extended Mode Register (EMRS) A1 bit 3727215976Sjmallett during initialization. 3728215976Sjmallett 0 = Normal 3729215976Sjmallett 1 = Reduced 3730215976Sjmallett DIC[1] is used to load into EMRS 3731215976Sjmallett bit 10 - DQSN Enable/Disable field. By default, we 3732215976Sjmallett program the DDR's to drive the DQSN also. Set it to 3733215976Sjmallett 1 if DQSN should be Hi-Z. 3734215976Sjmallett 0 - DQSN Enable 3735215976Sjmallett 1 - DQSN Disable */ 3736215976Sjmallett#else 3737215976Sjmallett uint64_t dic : 2; 3738215976Sjmallett uint64_t qs_dic : 2; 3739215976Sjmallett uint64_t tskw : 2; 3740215976Sjmallett uint64_t sil_lat : 2; 3741215976Sjmallett uint64_t bprch : 1; 3742215976Sjmallett uint64_t fprch2 : 1; 3743215976Sjmallett uint64_t mode128b : 1; 3744215976Sjmallett uint64_t dreset : 1; 3745215976Sjmallett uint64_t inorder_mrf : 1; 3746215976Sjmallett uint64_t inorder_mwf : 1; 3747215976Sjmallett uint64_t r2r_slot : 1; 3748215976Sjmallett uint64_t rdimm_ena : 1; 3749215976Sjmallett uint64_t reserved_16_17 : 2; 3750215976Sjmallett uint64_t max_write_batch : 4; 3751215976Sjmallett uint64_t xor_bank : 1; 3752215976Sjmallett uint64_t slow_scf : 1; 3753215976Sjmallett uint64_t ddr__pctl : 4; 3754215976Sjmallett uint64_t ddr__nctl : 4; 3755215976Sjmallett uint64_t reserved_32_63 : 32; 3756215976Sjmallett#endif 3757215976Sjmallett } cn58xx; 3758215976Sjmallett struct cvmx_lmcx_ctl_cn58xx cn58xxp1; 3759215976Sjmallett}; 3760215976Sjmalletttypedef union cvmx_lmcx_ctl cvmx_lmcx_ctl_t; 3761215976Sjmallett 3762215976Sjmallett/** 3763215976Sjmallett * cvmx_lmc#_ctl1 3764215976Sjmallett * 3765215976Sjmallett * LMC_CTL1 = LMC Control1 3766215976Sjmallett * This register is an assortment of various control fields needed by the memory controller 3767215976Sjmallett */ 3768232812Sjmallettunion cvmx_lmcx_ctl1 { 3769215976Sjmallett uint64_t u64; 3770232812Sjmallett struct cvmx_lmcx_ctl1_s { 3771232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3772215976Sjmallett uint64_t reserved_21_63 : 43; 3773215976Sjmallett uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 3774215976Sjmallett 0=disabled, 1=enabled */ 3775215976Sjmallett uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after 3776215976Sjmallett having waited for 2^FORCEWRITE cycles. 0=disabled. */ 3777215976Sjmallett uint64_t idlepower : 3; /**< Enter power-down mode after the memory controller has 3778215976Sjmallett been idle for 2^(2+IDLEPOWER) cycles. 0=disabled. */ 3779215976Sjmallett uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1 transition 3780215976Sjmallett on LMC_MEM_CFG0[INIT_START]. 3781215976Sjmallett 0=DDR2 power-up/init, 1=read-leveling 3782215976Sjmallett 2=self-refresh entry, 3=self-refresh exit, 3783215976Sjmallett 4=power-down entry, 5=power-down exit, 6=7=illegal */ 3784215976Sjmallett uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ 3785215976Sjmallett uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 3786215976Sjmallett 0=disable, 1=enable 3787215976Sjmallett If the memory part does not support DCC, then this bit 3788215976Sjmallett must be set to 0. */ 3789215976Sjmallett uint64_t reserved_2_7 : 6; 3790215976Sjmallett uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane: 3791215976Sjmallett In 32b mode, this setting has no effect and the data 3792215976Sjmallett layout DQ[35:0] is the following: 3793215976Sjmallett [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]] 3794215976Sjmallett In 16b mode, the DQ[35:0] layouts are the following: 3795215976Sjmallett 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]] 3796215976Sjmallett 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]] 3797215976Sjmallett 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]] 3798215976Sjmallett where E means ecc, D means data, and 0 means unused 3799215976Sjmallett (ignored on reads and written as 0 on writes) */ 3800215976Sjmallett#else 3801215976Sjmallett uint64_t data_layout : 2; 3802215976Sjmallett uint64_t reserved_2_7 : 6; 3803215976Sjmallett uint64_t dcc_enable : 1; 3804215976Sjmallett uint64_t sil_mode : 1; 3805215976Sjmallett uint64_t sequence : 3; 3806215976Sjmallett uint64_t idlepower : 3; 3807215976Sjmallett uint64_t forcewrite : 4; 3808215976Sjmallett uint64_t ecc_adr : 1; 3809215976Sjmallett uint64_t reserved_21_63 : 43; 3810215976Sjmallett#endif 3811215976Sjmallett } s; 3812232812Sjmallett struct cvmx_lmcx_ctl1_cn30xx { 3813232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3814215976Sjmallett uint64_t reserved_2_63 : 62; 3815215976Sjmallett uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane: 3816215976Sjmallett In 32b mode, this setting has no effect and the data 3817215976Sjmallett layout DQ[35:0] is the following: 3818215976Sjmallett [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]] 3819215976Sjmallett In 16b mode, the DQ[35:0] layouts are the following: 3820215976Sjmallett 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]] 3821215976Sjmallett 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]] 3822215976Sjmallett 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]] 3823215976Sjmallett where E means ecc, D means data, and 0 means unused 3824215976Sjmallett (ignored on reads and written as 0 on writes) */ 3825215976Sjmallett#else 3826215976Sjmallett uint64_t data_layout : 2; 3827215976Sjmallett uint64_t reserved_2_63 : 62; 3828215976Sjmallett#endif 3829215976Sjmallett } cn30xx; 3830232812Sjmallett struct cvmx_lmcx_ctl1_cn50xx { 3831232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3832215976Sjmallett uint64_t reserved_10_63 : 54; 3833215976Sjmallett uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ 3834215976Sjmallett uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 3835215976Sjmallett 0=disable, 1=enable 3836215976Sjmallett If the memory part does not support DCC, then this bit 3837215976Sjmallett must be set to 0. */ 3838215976Sjmallett uint64_t reserved_2_7 : 6; 3839215976Sjmallett uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane: 3840215976Sjmallett In 32b mode, this setting has no effect and the data 3841215976Sjmallett layout DQ[35:0] is the following: 3842215976Sjmallett [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]] 3843215976Sjmallett In 16b mode, the DQ[35:0] layouts are the following: 3844215976Sjmallett 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]] 3845215976Sjmallett 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]] 3846215976Sjmallett 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]] 3847215976Sjmallett where E means ecc, D means data, and 0 means unused 3848215976Sjmallett (ignored on reads and written as 0 on writes) */ 3849215976Sjmallett#else 3850215976Sjmallett uint64_t data_layout : 2; 3851215976Sjmallett uint64_t reserved_2_7 : 6; 3852215976Sjmallett uint64_t dcc_enable : 1; 3853215976Sjmallett uint64_t sil_mode : 1; 3854215976Sjmallett uint64_t reserved_10_63 : 54; 3855215976Sjmallett#endif 3856215976Sjmallett } cn50xx; 3857232812Sjmallett struct cvmx_lmcx_ctl1_cn52xx { 3858232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3859215976Sjmallett uint64_t reserved_21_63 : 43; 3860215976Sjmallett uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 3861215976Sjmallett 0=disabled, 1=enabled */ 3862215976Sjmallett uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after 3863215976Sjmallett having waited for 2^FORCEWRITE cycles. 0=disabled. */ 3864215976Sjmallett uint64_t idlepower : 3; /**< Enter power-down mode after the memory controller has 3865215976Sjmallett been idle for 2^(2+IDLEPOWER) cycles. 0=disabled. */ 3866215976Sjmallett uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1 transition 3867215976Sjmallett on LMC_MEM_CFG0[INIT_START]. 3868215976Sjmallett 0=DDR2 power-up/init, 1=read-leveling 3869215976Sjmallett 2=self-refresh entry, 3=self-refresh exit, 3870215976Sjmallett 4=power-down entry, 5=power-down exit, 6=7=illegal */ 3871215976Sjmallett uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ 3872215976Sjmallett uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 3873215976Sjmallett 0=disable, 1=enable 3874215976Sjmallett If the memory part does not support DCC, then this bit 3875215976Sjmallett must be set to 0. */ 3876215976Sjmallett uint64_t reserved_0_7 : 8; 3877215976Sjmallett#else 3878215976Sjmallett uint64_t reserved_0_7 : 8; 3879215976Sjmallett uint64_t dcc_enable : 1; 3880215976Sjmallett uint64_t sil_mode : 1; 3881215976Sjmallett uint64_t sequence : 3; 3882215976Sjmallett uint64_t idlepower : 3; 3883215976Sjmallett uint64_t forcewrite : 4; 3884215976Sjmallett uint64_t ecc_adr : 1; 3885215976Sjmallett uint64_t reserved_21_63 : 43; 3886215976Sjmallett#endif 3887215976Sjmallett } cn52xx; 3888215976Sjmallett struct cvmx_lmcx_ctl1_cn52xx cn52xxp1; 3889215976Sjmallett struct cvmx_lmcx_ctl1_cn52xx cn56xx; 3890215976Sjmallett struct cvmx_lmcx_ctl1_cn52xx cn56xxp1; 3891232812Sjmallett struct cvmx_lmcx_ctl1_cn58xx { 3892232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3893215976Sjmallett uint64_t reserved_10_63 : 54; 3894215976Sjmallett uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ 3895215976Sjmallett uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 3896215976Sjmallett 0=disable, 1=enable 3897215976Sjmallett If the memory part does not support DCC, then this bit 3898215976Sjmallett must be set to 0. */ 3899215976Sjmallett uint64_t reserved_0_7 : 8; 3900215976Sjmallett#else 3901215976Sjmallett uint64_t reserved_0_7 : 8; 3902215976Sjmallett uint64_t dcc_enable : 1; 3903215976Sjmallett uint64_t sil_mode : 1; 3904215976Sjmallett uint64_t reserved_10_63 : 54; 3905215976Sjmallett#endif 3906215976Sjmallett } cn58xx; 3907215976Sjmallett struct cvmx_lmcx_ctl1_cn58xx cn58xxp1; 3908215976Sjmallett}; 3909215976Sjmalletttypedef union cvmx_lmcx_ctl1 cvmx_lmcx_ctl1_t; 3910215976Sjmallett 3911215976Sjmallett/** 3912215976Sjmallett * cvmx_lmc#_dclk_cnt 3913215976Sjmallett * 3914215976Sjmallett * LMC_DCLK_CNT = Performance Counters 3915215976Sjmallett * 3916215976Sjmallett */ 3917232812Sjmallettunion cvmx_lmcx_dclk_cnt { 3918215976Sjmallett uint64_t u64; 3919232812Sjmallett struct cvmx_lmcx_dclk_cnt_s { 3920232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3921215976Sjmallett uint64_t dclkcnt : 64; /**< Performance Counter 3922215976Sjmallett 64-bit counter that increments every CK cycle */ 3923215976Sjmallett#else 3924215976Sjmallett uint64_t dclkcnt : 64; 3925215976Sjmallett#endif 3926215976Sjmallett } s; 3927232812Sjmallett struct cvmx_lmcx_dclk_cnt_s cn61xx; 3928215976Sjmallett struct cvmx_lmcx_dclk_cnt_s cn63xx; 3929215976Sjmallett struct cvmx_lmcx_dclk_cnt_s cn63xxp1; 3930232812Sjmallett struct cvmx_lmcx_dclk_cnt_s cn66xx; 3931232812Sjmallett struct cvmx_lmcx_dclk_cnt_s cn68xx; 3932232812Sjmallett struct cvmx_lmcx_dclk_cnt_s cn68xxp1; 3933232812Sjmallett struct cvmx_lmcx_dclk_cnt_s cnf71xx; 3934215976Sjmallett}; 3935215976Sjmalletttypedef union cvmx_lmcx_dclk_cnt cvmx_lmcx_dclk_cnt_t; 3936215976Sjmallett 3937215976Sjmallett/** 3938215976Sjmallett * cvmx_lmc#_dclk_cnt_hi 3939215976Sjmallett * 3940215976Sjmallett * LMC_DCLK_CNT_HI = Performance Counters 3941215976Sjmallett * 3942215976Sjmallett */ 3943232812Sjmallettunion cvmx_lmcx_dclk_cnt_hi { 3944215976Sjmallett uint64_t u64; 3945232812Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s { 3946232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3947215976Sjmallett uint64_t reserved_32_63 : 32; 3948215976Sjmallett uint64_t dclkcnt_hi : 32; /**< Performance Counter that counts dclks 3949215976Sjmallett Upper 32-bits of a 64-bit counter. */ 3950215976Sjmallett#else 3951215976Sjmallett uint64_t dclkcnt_hi : 32; 3952215976Sjmallett uint64_t reserved_32_63 : 32; 3953215976Sjmallett#endif 3954215976Sjmallett } s; 3955215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn30xx; 3956215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn31xx; 3957215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn38xx; 3958215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2; 3959215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn50xx; 3960215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn52xx; 3961215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1; 3962215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn56xx; 3963215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1; 3964215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn58xx; 3965215976Sjmallett struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1; 3966215976Sjmallett}; 3967215976Sjmalletttypedef union cvmx_lmcx_dclk_cnt_hi cvmx_lmcx_dclk_cnt_hi_t; 3968215976Sjmallett 3969215976Sjmallett/** 3970215976Sjmallett * cvmx_lmc#_dclk_cnt_lo 3971215976Sjmallett * 3972215976Sjmallett * LMC_DCLK_CNT_LO = Performance Counters 3973215976Sjmallett * 3974215976Sjmallett */ 3975232812Sjmallettunion cvmx_lmcx_dclk_cnt_lo { 3976215976Sjmallett uint64_t u64; 3977232812Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s { 3978232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 3979215976Sjmallett uint64_t reserved_32_63 : 32; 3980215976Sjmallett uint64_t dclkcnt_lo : 32; /**< Performance Counter that counts dclks 3981215976Sjmallett Lower 32-bits of a 64-bit counter. */ 3982215976Sjmallett#else 3983215976Sjmallett uint64_t dclkcnt_lo : 32; 3984215976Sjmallett uint64_t reserved_32_63 : 32; 3985215976Sjmallett#endif 3986215976Sjmallett } s; 3987215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn30xx; 3988215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn31xx; 3989215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn38xx; 3990215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2; 3991215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn50xx; 3992215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn52xx; 3993215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1; 3994215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn56xx; 3995215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1; 3996215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn58xx; 3997215976Sjmallett struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1; 3998215976Sjmallett}; 3999215976Sjmalletttypedef union cvmx_lmcx_dclk_cnt_lo cvmx_lmcx_dclk_cnt_lo_t; 4000215976Sjmallett 4001215976Sjmallett/** 4002215976Sjmallett * cvmx_lmc#_dclk_ctl 4003215976Sjmallett * 4004215976Sjmallett * LMC_DCLK_CTL = LMC DCLK generation control 4005215976Sjmallett * 4006215976Sjmallett * 4007215976Sjmallett * Notes: 4008215976Sjmallett * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used. 4009215976Sjmallett * 4010215976Sjmallett */ 4011232812Sjmallettunion cvmx_lmcx_dclk_ctl { 4012215976Sjmallett uint64_t u64; 4013232812Sjmallett struct cvmx_lmcx_dclk_ctl_s { 4014232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4015215976Sjmallett uint64_t reserved_8_63 : 56; 4016215976Sjmallett uint64_t off90_ena : 1; /**< 0=use global DCLK (i.e. the PLL) directly for LMC1 4017215976Sjmallett 1=use the 90 degree DCLK DLL to offset LMC1 DCLK */ 4018215976Sjmallett uint64_t dclk90_byp : 1; /**< 0=90 degree DCLK DLL uses sampled delay from LMC0 4019215976Sjmallett 1=90 degree DCLK DLL uses DCLK90_VLU 4020215976Sjmallett See DCLK90_VLU. */ 4021215976Sjmallett uint64_t dclk90_ld : 1; /**< The 90 degree DCLK DLL samples the delay setting 4022215976Sjmallett from LMC0's DLL when this field transitions 0->1 */ 4023215976Sjmallett uint64_t dclk90_vlu : 5; /**< Manual open-loop delay setting. 4024215976Sjmallett The LMC1 90 degree DCLK DLL uses DCLK90_VLU rather 4025215976Sjmallett than the delay setting sampled from LMC0 when 4026215976Sjmallett DCLK90_BYP=1. */ 4027215976Sjmallett#else 4028215976Sjmallett uint64_t dclk90_vlu : 5; 4029215976Sjmallett uint64_t dclk90_ld : 1; 4030215976Sjmallett uint64_t dclk90_byp : 1; 4031215976Sjmallett uint64_t off90_ena : 1; 4032215976Sjmallett uint64_t reserved_8_63 : 56; 4033215976Sjmallett#endif 4034215976Sjmallett } s; 4035215976Sjmallett struct cvmx_lmcx_dclk_ctl_s cn56xx; 4036215976Sjmallett struct cvmx_lmcx_dclk_ctl_s cn56xxp1; 4037215976Sjmallett}; 4038215976Sjmalletttypedef union cvmx_lmcx_dclk_ctl cvmx_lmcx_dclk_ctl_t; 4039215976Sjmallett 4040215976Sjmallett/** 4041215976Sjmallett * cvmx_lmc#_ddr2_ctl 4042215976Sjmallett * 4043215976Sjmallett * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register 4044215976Sjmallett * 4045215976Sjmallett */ 4046232812Sjmallettunion cvmx_lmcx_ddr2_ctl { 4047215976Sjmallett uint64_t u64; 4048232812Sjmallett struct cvmx_lmcx_ddr2_ctl_s { 4049232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4050215976Sjmallett uint64_t reserved_32_63 : 32; 4051215976Sjmallett uint64_t bank8 : 1; /**< For 8 bank DDR2 parts 4052215976Sjmallett 1 - DDR2 parts have 8 internal banks (BA is 3 bits 4053215976Sjmallett wide). 4054215976Sjmallett 0 - DDR2 parts have 4 internal banks (BA is 2 bits 4055215976Sjmallett wide). */ 4056215976Sjmallett uint64_t burst8 : 1; /**< 8-burst mode. 4057215976Sjmallett 1 - DDR data transfer happens in burst of 8 4058215976Sjmallett 0 - DDR data transfer happens in burst of 4 4059215976Sjmallett BURST8 should be set when DDR2T is set 4060215976Sjmallett to minimize the command bandwidth loss. */ 4061215976Sjmallett uint64_t addlat : 3; /**< Additional Latency for posted CAS 4062215976Sjmallett When Posted CAS is on, this configures the additional 4063215976Sjmallett latency. This should be set to 4064215976Sjmallett 1 .. LMC_MEM_CFG1[TRCD]-2 4065215976Sjmallett (Note the implication that posted CAS should not 4066215976Sjmallett be used when tRCD is two.) */ 4067215976Sjmallett uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR2. */ 4068215976Sjmallett uint64_t bwcnt : 1; /**< Bus utilization counter Clear. 4069215976Sjmallett Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and 4070215976Sjmallett LMC_DCLK_CNT_* registers. SW should first write this 4071215976Sjmallett field to a one, then write this field to a zero to 4072215976Sjmallett clear the CSR's. */ 4073215976Sjmallett uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay 4074215976Sjmallett This is not a direct encoding of the value. Its 4075215976Sjmallett programmed as below per DDR2 spec. The decimal number 4076215976Sjmallett on the right is RNDUP(tWR(ns) / tCYC(ns)) 4077215976Sjmallett TYP=15ns 4078215976Sjmallett - 000: RESERVED 4079215976Sjmallett - 001: 2 4080215976Sjmallett - 010: 3 4081215976Sjmallett - 011: 4 4082215976Sjmallett - 100: 5 4083215976Sjmallett - 101: 6 4084215976Sjmallett - 110: 7 4085215976Sjmallett - 111: 8 */ 4086215976Sjmallett uint64_t silo_hc : 1; /**< Delays the read sample window by a Half Cycle. */ 4087215976Sjmallett uint64_t ddr_eof : 4; /**< Early Fill Counter Init. 4088215976Sjmallett L2 needs to know a few cycle before a fill completes so 4089215976Sjmallett it can get its Control pipe started (for better overall 4090215976Sjmallett performance). This counter contains an init value which 4091215976Sjmallett is a function of Eclk/Dclk ratio to account for the 4092215976Sjmallett asynchronous boundary between L2 cache and the DRAM 4093215976Sjmallett controller. This init value will 4094215976Sjmallett determine when to safely let the L2 know that a fill 4095215976Sjmallett termination is coming up. 4096215976Sjmallett Set DDR_EOF according to the following rule: 4097215976Sjmallett eclkFreq/dclkFreq = dclkPeriod/eclkPeriod = RATIO 4098215976Sjmallett RATIO < 6/6 -> illegal 4099215976Sjmallett 6/6 <= RATIO < 6/5 -> DDR_EOF=3 4100215976Sjmallett 6/5 <= RATIO < 6/4 -> DDR_EOF=3 4101215976Sjmallett 6/4 <= RATIO < 6/3 -> DDR_EOF=2 4102215976Sjmallett 6/3 <= RATIO < 6/2 -> DDR_EOF=1 4103215976Sjmallett 6/2 <= RATIO < 6/1 -> DDR_EOF=0 4104215976Sjmallett 6/1 <= RATIO -> DDR_EOF=0 */ 4105215976Sjmallett uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1 4106215976Sjmallett Four Access Window time. Relevant only in DDR2 AND in 4107215976Sjmallett 8-bank parts. 4108215976Sjmallett tFAW = 5'b0 in DDR2-4bank 4109215976Sjmallett tFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 4110215976Sjmallett in DDR2-8bank */ 4111215976Sjmallett uint64_t crip_mode : 1; /**< Cripple Mode - When set, the LMC allows only 4112215976Sjmallett 1 inflight transaction (.vs. 8 in normal mode). 4113215976Sjmallett This bit is ONLY to be set at power-on and 4114215976Sjmallett should not be set for normal use. */ 4115215976Sjmallett uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and 4116215976Sjmallett address. This mode helps relieve setup time pressure 4117215976Sjmallett on the Address and command bus which nominally have 4118215976Sjmallett a very large fanout. Please refer to Micron's tech 4119215976Sjmallett note tn_47_01 titled "DDR2-533 Memory Design Guide 4120215976Sjmallett for Two Dimm Unbuffered Systems" for physical details. 4121215976Sjmallett BURST8 should be set when DDR2T is set to minimize 4122215976Sjmallett add/cmd loss. */ 4123215976Sjmallett uint64_t odt_ena : 1; /**< Enable Obsolete ODT on Reads 4124215976Sjmallett Obsolete Read ODT wiggles DDR_ODT_* pins on reads. 4125215976Sjmallett Should normally be cleared to zero. 4126215976Sjmallett When this is on, the following fields must also be 4127215976Sjmallett programmed: 4128215976Sjmallett LMC_CTL->QS_DIC - programs the termination value 4129215976Sjmallett LMC_RODT_CTL - programs the ODT I/O mask for Reads */ 4130215976Sjmallett uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after 4131215976Sjmallett DCLK init sequence will reset the DDR 90 DLL. Should 4132215976Sjmallett happen at startup before any activity in DDR. 4133215976Sjmallett DRESET should be asserted before and for 10 usec 4134215976Sjmallett following the 0->1 transition on QDLL_ENA. */ 4135215976Sjmallett uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay 4136215976Sjmallett line. */ 4137215976Sjmallett uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be 4138215976Sjmallett bypassed and the setting is defined by DLL90_VLU */ 4139215976Sjmallett uint64_t rdqs : 1; /**< DDR2 RDQS mode. When set, configures memory subsystem to 4140215976Sjmallett use unidirectional DQS pins. RDQS/DM - Rcv & DQS - Xmit */ 4141215976Sjmallett uint64_t ddr2 : 1; /**< Should be set */ 4142215976Sjmallett#else 4143215976Sjmallett uint64_t ddr2 : 1; 4144215976Sjmallett uint64_t rdqs : 1; 4145215976Sjmallett uint64_t dll90_byp : 1; 4146215976Sjmallett uint64_t dll90_vlu : 5; 4147215976Sjmallett uint64_t qdll_ena : 1; 4148215976Sjmallett uint64_t odt_ena : 1; 4149215976Sjmallett uint64_t ddr2t : 1; 4150215976Sjmallett uint64_t crip_mode : 1; 4151215976Sjmallett uint64_t tfaw : 5; 4152215976Sjmallett uint64_t ddr_eof : 4; 4153215976Sjmallett uint64_t silo_hc : 1; 4154215976Sjmallett uint64_t twr : 3; 4155215976Sjmallett uint64_t bwcnt : 1; 4156215976Sjmallett uint64_t pocas : 1; 4157215976Sjmallett uint64_t addlat : 3; 4158215976Sjmallett uint64_t burst8 : 1; 4159215976Sjmallett uint64_t bank8 : 1; 4160215976Sjmallett uint64_t reserved_32_63 : 32; 4161215976Sjmallett#endif 4162215976Sjmallett } s; 4163232812Sjmallett struct cvmx_lmcx_ddr2_ctl_cn30xx { 4164232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4165215976Sjmallett uint64_t reserved_32_63 : 32; 4166215976Sjmallett uint64_t bank8 : 1; /**< For 8 bank DDR2 parts 4167215976Sjmallett 1 - DDR2 parts have 8 internal banks (BA is 3 bits 4168215976Sjmallett wide). 4169215976Sjmallett 0 - DDR2 parts have 4 internal banks (BA is 2 bits 4170215976Sjmallett wide). */ 4171215976Sjmallett uint64_t burst8 : 1; /**< 8-burst mode. 4172215976Sjmallett 1 - DDR data transfer happens in burst of 8 4173215976Sjmallett 0 - DDR data transfer happens in burst of 4 4174215976Sjmallett BURST8 should be set when DDR2T is set to minimize 4175215976Sjmallett add/cmd bandwidth loss. */ 4176215976Sjmallett uint64_t addlat : 3; /**< Additional Latency for posted CAS 4177215976Sjmallett When Posted CAS is on, this configures the additional 4178215976Sjmallett latency. This should be set to 4179215976Sjmallett 1 .. LMC_MEM_CFG1[TRCD]-2 4180215976Sjmallett (Note the implication that posted CAS should not 4181215976Sjmallett be used when tRCD is two.) */ 4182215976Sjmallett uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR2. */ 4183215976Sjmallett uint64_t bwcnt : 1; /**< Bus utilization counter Clear. 4184215976Sjmallett Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and 4185215976Sjmallett LMC_DCLK_CNT_* registers. SW should first write this 4186215976Sjmallett field to a one, then write this field to a zero to 4187215976Sjmallett clear the CSR's. */ 4188215976Sjmallett uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay 4189215976Sjmallett This is not a direct encoding of the value. Its 4190215976Sjmallett programmed as below per DDR2 spec. The decimal number 4191215976Sjmallett on the right is RNDUP(tWR(ns) / tCYC(ns)) 4192215976Sjmallett TYP=15ns 4193215976Sjmallett - 000: RESERVED 4194215976Sjmallett - 001: 2 4195215976Sjmallett - 010: 3 4196215976Sjmallett - 011: 4 4197215976Sjmallett - 100: 5 4198215976Sjmallett - 101: 6 4199215976Sjmallett - 110-111: RESERVED */ 4200215976Sjmallett uint64_t silo_hc : 1; /**< Delays the read sample window by a Half Cycle. */ 4201215976Sjmallett uint64_t ddr_eof : 4; /**< Early Fill Counter Init. 4202215976Sjmallett L2 needs to know a few cycle before a fill completes so 4203215976Sjmallett it can get its Control pipe started (for better overall 4204215976Sjmallett performance). This counter contains an init value which 4205215976Sjmallett is a function of Eclk/Dclk ratio to account for the 4206215976Sjmallett asynchronous boundary between L2 cache and the DRAM 4207215976Sjmallett controller. This init value will 4208215976Sjmallett determine when to safely let the L2 know that a fill 4209215976Sjmallett termination is coming up. 4210215976Sjmallett DDR_EOF = RNDUP (DCLK period/Eclk Period). If the ratio 4211215976Sjmallett is above 3, set DDR_EOF to 3. 4212215976Sjmallett DCLK/ECLK period DDR_EOF 4213215976Sjmallett Less than 1 1 4214215976Sjmallett Less than 2 2 4215215976Sjmallett More than 2 3 */ 4216215976Sjmallett uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1 4217215976Sjmallett Four Access Window time. Relevant only in 4218215976Sjmallett 8-bank parts. 4219215976Sjmallett TFAW = 5'b0 for DDR2-4bank 4220215976Sjmallett TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */ 4221215976Sjmallett uint64_t crip_mode : 1; /**< Cripple Mode - When set, the LMC allows only 4222215976Sjmallett 1 inflight transaction (.vs. 8 in normal mode). 4223215976Sjmallett This bit is ONLY to be set at power-on and 4224215976Sjmallett should not be set for normal use. */ 4225215976Sjmallett uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and 4226215976Sjmallett address. This mode helps relieve setup time pressure 4227215976Sjmallett on the Address and command bus which nominally have 4228215976Sjmallett a very large fanout. Please refer to Micron's tech 4229215976Sjmallett note tn_47_01 titled "DDR2-533 Memory Design Guide 4230215976Sjmallett for Two Dimm Unbuffered Systems" for physical details. 4231215976Sjmallett BURST8 should be used when DDR2T is set to minimize 4232215976Sjmallett add/cmd bandwidth loss. */ 4233215976Sjmallett uint64_t odt_ena : 1; /**< Enable ODT for DDR2 on Reads 4234215976Sjmallett When this is on, the following fields must also be 4235215976Sjmallett programmed: 4236215976Sjmallett LMC_CTL->QS_DIC - programs the termination value 4237215976Sjmallett LMC_RODT_CTL - programs the ODT I/O mask for writes 4238215976Sjmallett Program as 0 for DDR1 mode and ODT needs to be off 4239215976Sjmallett on Octeon Reads */ 4240215976Sjmallett uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after 4241215976Sjmallett erst deassertion will reset the DDR 90 DLL. Should 4242215976Sjmallett happen at startup before any activity in DDR. */ 4243215976Sjmallett uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay 4244215976Sjmallett line. */ 4245215976Sjmallett uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be 4246215976Sjmallett bypassed and the setting is defined by DLL90_VLU */ 4247215976Sjmallett uint64_t reserved_1_1 : 1; 4248215976Sjmallett uint64_t ddr2 : 1; /**< DDR2 Enable: When set, configures memory subsystem for 4249215976Sjmallett DDR-II SDRAMs. */ 4250215976Sjmallett#else 4251215976Sjmallett uint64_t ddr2 : 1; 4252215976Sjmallett uint64_t reserved_1_1 : 1; 4253215976Sjmallett uint64_t dll90_byp : 1; 4254215976Sjmallett uint64_t dll90_vlu : 5; 4255215976Sjmallett uint64_t qdll_ena : 1; 4256215976Sjmallett uint64_t odt_ena : 1; 4257215976Sjmallett uint64_t ddr2t : 1; 4258215976Sjmallett uint64_t crip_mode : 1; 4259215976Sjmallett uint64_t tfaw : 5; 4260215976Sjmallett uint64_t ddr_eof : 4; 4261215976Sjmallett uint64_t silo_hc : 1; 4262215976Sjmallett uint64_t twr : 3; 4263215976Sjmallett uint64_t bwcnt : 1; 4264215976Sjmallett uint64_t pocas : 1; 4265215976Sjmallett uint64_t addlat : 3; 4266215976Sjmallett uint64_t burst8 : 1; 4267215976Sjmallett uint64_t bank8 : 1; 4268215976Sjmallett uint64_t reserved_32_63 : 32; 4269215976Sjmallett#endif 4270215976Sjmallett } cn30xx; 4271215976Sjmallett struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx; 4272215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn38xx; 4273215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn38xxp2; 4274215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn50xx; 4275215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn52xx; 4276215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn52xxp1; 4277215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn56xx; 4278215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn56xxp1; 4279215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn58xx; 4280215976Sjmallett struct cvmx_lmcx_ddr2_ctl_s cn58xxp1; 4281215976Sjmallett}; 4282215976Sjmalletttypedef union cvmx_lmcx_ddr2_ctl cvmx_lmcx_ddr2_ctl_t; 4283215976Sjmallett 4284215976Sjmallett/** 4285215976Sjmallett * cvmx_lmc#_ddr_pll_ctl 4286215976Sjmallett * 4287215976Sjmallett * LMC_DDR_PLL_CTL = LMC DDR PLL control 4288215976Sjmallett * 4289215976Sjmallett * 4290215976Sjmallett * Notes: 4291215976Sjmallett * DDR PLL Bringup sequence: 4292215976Sjmallett * 1. Write CLKF, DDR_PS_EN, DFM_PS_EN, DIFFAMP, CPS, CPB. 4293215976Sjmallett * If test mode is going to be activated, then also write jtg__ddr_pll_tm_en1, jtg__ddr_pll_tm_en2, jtg__ddr_pll_tm_en3, 4294215976Sjmallett * jtg__ddr_pll_tm_en4, jtg__dfa_pll_tm_en1, jtg__dfa_pll_tm_en2, jtg__dfa_pll_tm_en3, jtg__dfa_pll_tm_en4, JTAG_TEST_MODE 4295215976Sjmallett * 2. Wait 128 ref clock cycles (7680 rclk cycles) 4296215976Sjmallett * 3. Write 1 to RESET_N 4297215976Sjmallett * 4. Wait 1152 ref clocks (1152*16 rclk cycles) 4298215976Sjmallett * 5. Write 0 to DDR_DIV_RESET and DFM_DIV_RESET 4299215976Sjmallett * 6. Wait 10 ref clock cycles (160 rclk cycles) before bringing up the DDR interface 4300215976Sjmallett * If test mode is going to be activated, wait an additional 8191 ref clocks (8191*16 rclk cycles) to allow PLL 4301215976Sjmallett * clock alignment 4302215976Sjmallett */ 4303232812Sjmallettunion cvmx_lmcx_ddr_pll_ctl { 4304215976Sjmallett uint64_t u64; 4305232812Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s { 4306232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4307215976Sjmallett uint64_t reserved_27_63 : 37; 4308215976Sjmallett uint64_t jtg_test_mode : 1; /**< JTAG Test Mode 4309215976Sjmallett Clock alignment between DCLK & REFCLK as well as FCLK & 4310215976Sjmallett REFCLK can only be performed after the ddr_pll_divider_reset 4311215976Sjmallett is deasserted. SW need to wait atleast 10 reference clock 4312215976Sjmallett cycles after deasserting pll_divider_reset before asserting 4313215976Sjmallett LMC(0)_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can 4314215976Sjmallett take upto 160 microseconds) DCLK and FCLK can exhibit some 4315215976Sjmallett high frequency pulses. Therefore, all bring up activities in 4316215976Sjmallett that clock domain need to be delayed (when the chip operates 4317215976Sjmallett in jtg_test_mode) by about 160 microseconds to ensure that 4318215976Sjmallett lock is achieved. */ 4319215976Sjmallett uint64_t dfm_div_reset : 1; /**< DFM postscalar divider reset */ 4320215976Sjmallett uint64_t dfm_ps_en : 3; /**< DFM postscalar divide ratio 4321215976Sjmallett Determines the DFM CK speed. 4322215976Sjmallett 0x0 : Divide LMC+DFM PLL output by 1 4323215976Sjmallett 0x1 : Divide LMC+DFM PLL output by 2 4324215976Sjmallett 0x2 : Divide LMC+DFM PLL output by 3 4325215976Sjmallett 0x3 : Divide LMC+DFM PLL output by 4 4326215976Sjmallett 0x4 : Divide LMC+DFM PLL output by 6 4327215976Sjmallett 0x5 : Divide LMC+DFM PLL output by 8 4328215976Sjmallett 0x6 : Divide LMC+DFM PLL output by 12 4329215976Sjmallett 0x7 : Divide LMC+DFM PLL output by 12 4330215976Sjmallett DFM_PS_EN is not used when DFM_DIV_RESET = 1 */ 4331215976Sjmallett uint64_t ddr_div_reset : 1; /**< DDR postscalar divider reset */ 4332215976Sjmallett uint64_t ddr_ps_en : 3; /**< DDR postscalar divide ratio 4333215976Sjmallett Determines the LMC CK speed. 4334215976Sjmallett 0x0 : Divide LMC+DFM PLL output by 1 4335215976Sjmallett 0x1 : Divide LMC+DFM PLL output by 2 4336215976Sjmallett 0x2 : Divide LMC+DFM PLL output by 3 4337215976Sjmallett 0x3 : Divide LMC+DFM PLL output by 4 4338215976Sjmallett 0x4 : Divide LMC+DFM PLL output by 6 4339215976Sjmallett 0x5 : Divide LMC+DFM PLL output by 8 4340215976Sjmallett 0x6 : Divide LMC+DFM PLL output by 12 4341215976Sjmallett 0x7 : Divide LMC+DFM PLL output by 12 4342215976Sjmallett DDR_PS_EN is not used when DDR_DIV_RESET = 1 */ 4343215976Sjmallett uint64_t diffamp : 4; /**< PLL diffamp input transconductance */ 4344215976Sjmallett uint64_t cps : 3; /**< PLL charge-pump current */ 4345215976Sjmallett uint64_t cpb : 3; /**< PLL charge-pump current */ 4346215976Sjmallett uint64_t reset_n : 1; /**< PLL reset */ 4347215976Sjmallett uint64_t clkf : 7; /**< Multiply reference by CLKF 4348215976Sjmallett 32 <= CLKF <= 64 4349215976Sjmallett LMC+DFM PLL frequency = 50 * CLKF 4350215976Sjmallett min = 1.6 GHz, max = 3.2 GHz */ 4351215976Sjmallett#else 4352215976Sjmallett uint64_t clkf : 7; 4353215976Sjmallett uint64_t reset_n : 1; 4354215976Sjmallett uint64_t cpb : 3; 4355215976Sjmallett uint64_t cps : 3; 4356215976Sjmallett uint64_t diffamp : 4; 4357215976Sjmallett uint64_t ddr_ps_en : 3; 4358215976Sjmallett uint64_t ddr_div_reset : 1; 4359215976Sjmallett uint64_t dfm_ps_en : 3; 4360215976Sjmallett uint64_t dfm_div_reset : 1; 4361215976Sjmallett uint64_t jtg_test_mode : 1; 4362215976Sjmallett uint64_t reserved_27_63 : 37; 4363215976Sjmallett#endif 4364215976Sjmallett } s; 4365232812Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cn61xx; 4366215976Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cn63xx; 4367215976Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cn63xxp1; 4368232812Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cn66xx; 4369232812Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cn68xx; 4370232812Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cn68xxp1; 4371232812Sjmallett struct cvmx_lmcx_ddr_pll_ctl_s cnf71xx; 4372215976Sjmallett}; 4373215976Sjmalletttypedef union cvmx_lmcx_ddr_pll_ctl cvmx_lmcx_ddr_pll_ctl_t; 4374215976Sjmallett 4375215976Sjmallett/** 4376215976Sjmallett * cvmx_lmc#_delay_cfg 4377215976Sjmallett * 4378215976Sjmallett * LMC_DELAY_CFG = Open-loop delay line settings 4379215976Sjmallett * 4380215976Sjmallett * 4381215976Sjmallett * Notes: 4382215976Sjmallett * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm. Delay is approximately 4383215976Sjmallett * 50-80ps per setting depending on process/voltage. There is no need to add incoming delay since by 4384215976Sjmallett * default all strobe bits are delayed internally by 90 degrees (as was always the case in previous 4385215976Sjmallett * passes and past chips. 4386215976Sjmallett * 4387215976Sjmallett * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>, DDR_BA<2:0>, DDR_n_CS<1:0>_L, 4388215976Sjmallett * DDR_WE, DDR_CKE and DDR_ODT_<7:0>. Again, delay is 50-80ps per tap. 4389215976Sjmallett * 4390215976Sjmallett * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and DDR_CK_<5:0>_N. Again, delay is 4391215976Sjmallett * 50-80ps per tap. 4392215976Sjmallett * 4393215976Sjmallett * The usage scenario is the following: There is too much delay on command signals and setup on command 4394215976Sjmallett * is not met. The user can then delay the clock until setup is met. 4395215976Sjmallett * 4396215976Sjmallett * At the same time though, dq/dqs should be delayed because there is also a DDR spec tying dqs with 4397215976Sjmallett * clock. If clock is too much delayed with respect to dqs, writes will start to fail. 4398215976Sjmallett * 4399215976Sjmallett * This scheme should eliminate the board need of adding routing delay to clock signals to make high 4400215976Sjmallett * frequencies work. 4401215976Sjmallett */ 4402232812Sjmallettunion cvmx_lmcx_delay_cfg { 4403215976Sjmallett uint64_t u64; 4404232812Sjmallett struct cvmx_lmcx_delay_cfg_s { 4405232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4406215976Sjmallett uint64_t reserved_15_63 : 49; 4407215976Sjmallett uint64_t dq : 5; /**< Setting for DQ delay line */ 4408215976Sjmallett uint64_t cmd : 5; /**< Setting for CMD delay line */ 4409215976Sjmallett uint64_t clk : 5; /**< Setting for CLK delay line */ 4410215976Sjmallett#else 4411215976Sjmallett uint64_t clk : 5; 4412215976Sjmallett uint64_t cmd : 5; 4413215976Sjmallett uint64_t dq : 5; 4414215976Sjmallett uint64_t reserved_15_63 : 49; 4415215976Sjmallett#endif 4416215976Sjmallett } s; 4417215976Sjmallett struct cvmx_lmcx_delay_cfg_s cn30xx; 4418232812Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx { 4419232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4420215976Sjmallett uint64_t reserved_14_63 : 50; 4421215976Sjmallett uint64_t dq : 4; /**< Setting for DQ delay line */ 4422215976Sjmallett uint64_t reserved_9_9 : 1; 4423215976Sjmallett uint64_t cmd : 4; /**< Setting for CMD delay line */ 4424215976Sjmallett uint64_t reserved_4_4 : 1; 4425215976Sjmallett uint64_t clk : 4; /**< Setting for CLK delay line */ 4426215976Sjmallett#else 4427215976Sjmallett uint64_t clk : 4; 4428215976Sjmallett uint64_t reserved_4_4 : 1; 4429215976Sjmallett uint64_t cmd : 4; 4430215976Sjmallett uint64_t reserved_9_9 : 1; 4431215976Sjmallett uint64_t dq : 4; 4432215976Sjmallett uint64_t reserved_14_63 : 50; 4433215976Sjmallett#endif 4434215976Sjmallett } cn38xx; 4435215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn50xx; 4436215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn52xx; 4437215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1; 4438215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn56xx; 4439215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1; 4440215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn58xx; 4441215976Sjmallett struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1; 4442215976Sjmallett}; 4443215976Sjmalletttypedef union cvmx_lmcx_delay_cfg cvmx_lmcx_delay_cfg_t; 4444215976Sjmallett 4445215976Sjmallett/** 4446215976Sjmallett * cvmx_lmc#_dimm#_params 4447215976Sjmallett * 4448215976Sjmallett * LMC_DIMMX_PARAMS = LMC DIMMX Params 4449215976Sjmallett * This register contains values to be programmed into each control word in the corresponding (registered) DIMM. The control words allow 4450215976Sjmallett * optimization of the device properties for different raw card designs. 4451215976Sjmallett * 4452215976Sjmallett * Notes: 4453215976Sjmallett * LMC only uses this CSR when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, LMC writes 4454215976Sjmallett * these fields into the control words in the JEDEC standard SSTE32882 registering clock driver on an 4455215976Sjmallett * RDIMM when corresponding LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. 4456215976Sjmallett */ 4457232812Sjmallettunion cvmx_lmcx_dimmx_params { 4458215976Sjmallett uint64_t u64; 4459232812Sjmallett struct cvmx_lmcx_dimmx_params_s { 4460232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4461215976Sjmallett uint64_t rc15 : 4; /**< RC15, Reserved */ 4462215976Sjmallett uint64_t rc14 : 4; /**< RC14, Reserved */ 4463215976Sjmallett uint64_t rc13 : 4; /**< RC13, Reserved */ 4464215976Sjmallett uint64_t rc12 : 4; /**< RC12, Reserved */ 4465215976Sjmallett uint64_t rc11 : 4; /**< RC11, Encoding for RDIMM Operating VDD */ 4466215976Sjmallett uint64_t rc10 : 4; /**< RC10, Encoding for RDIMM Operating Speed */ 4467215976Sjmallett uint64_t rc9 : 4; /**< RC9 , Power Savings Settings Control Word */ 4468215976Sjmallett uint64_t rc8 : 4; /**< RC8 , Additional IBT Settings Control Word */ 4469215976Sjmallett uint64_t rc7 : 4; /**< RC7 , Reserved */ 4470215976Sjmallett uint64_t rc6 : 4; /**< RC6 , Reserved */ 4471215976Sjmallett uint64_t rc5 : 4; /**< RC5 , CK Driver Characterstics Control Word */ 4472215976Sjmallett uint64_t rc4 : 4; /**< RC4 , Control Signals Driver Characteristics Control Word */ 4473215976Sjmallett uint64_t rc3 : 4; /**< RC3 , CA Signals Driver Characterstics Control Word */ 4474215976Sjmallett uint64_t rc2 : 4; /**< RC2 , Timing Control Word */ 4475215976Sjmallett uint64_t rc1 : 4; /**< RC1 , Clock Driver Enable Control Word */ 4476215976Sjmallett uint64_t rc0 : 4; /**< RC0 , Global Features Control Word */ 4477215976Sjmallett#else 4478215976Sjmallett uint64_t rc0 : 4; 4479215976Sjmallett uint64_t rc1 : 4; 4480215976Sjmallett uint64_t rc2 : 4; 4481215976Sjmallett uint64_t rc3 : 4; 4482215976Sjmallett uint64_t rc4 : 4; 4483215976Sjmallett uint64_t rc5 : 4; 4484215976Sjmallett uint64_t rc6 : 4; 4485215976Sjmallett uint64_t rc7 : 4; 4486215976Sjmallett uint64_t rc8 : 4; 4487215976Sjmallett uint64_t rc9 : 4; 4488215976Sjmallett uint64_t rc10 : 4; 4489215976Sjmallett uint64_t rc11 : 4; 4490215976Sjmallett uint64_t rc12 : 4; 4491215976Sjmallett uint64_t rc13 : 4; 4492215976Sjmallett uint64_t rc14 : 4; 4493215976Sjmallett uint64_t rc15 : 4; 4494215976Sjmallett#endif 4495215976Sjmallett } s; 4496232812Sjmallett struct cvmx_lmcx_dimmx_params_s cn61xx; 4497215976Sjmallett struct cvmx_lmcx_dimmx_params_s cn63xx; 4498215976Sjmallett struct cvmx_lmcx_dimmx_params_s cn63xxp1; 4499232812Sjmallett struct cvmx_lmcx_dimmx_params_s cn66xx; 4500232812Sjmallett struct cvmx_lmcx_dimmx_params_s cn68xx; 4501232812Sjmallett struct cvmx_lmcx_dimmx_params_s cn68xxp1; 4502232812Sjmallett struct cvmx_lmcx_dimmx_params_s cnf71xx; 4503215976Sjmallett}; 4504215976Sjmalletttypedef union cvmx_lmcx_dimmx_params cvmx_lmcx_dimmx_params_t; 4505215976Sjmallett 4506215976Sjmallett/** 4507215976Sjmallett * cvmx_lmc#_dimm_ctl 4508215976Sjmallett * 4509215976Sjmallett * LMC_DIMM_CTL = LMC DIMM Control 4510215976Sjmallett * 4511215976Sjmallett * 4512215976Sjmallett * Notes: 4513215976Sjmallett * This CSR is only used when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, this CSR 4514215976Sjmallett * controls LMC's writes to the control words in the JEDEC standard SSTE32882 registering clock driver 4515215976Sjmallett * on an RDIMM. 4516215976Sjmallett */ 4517232812Sjmallettunion cvmx_lmcx_dimm_ctl { 4518215976Sjmallett uint64_t u64; 4519232812Sjmallett struct cvmx_lmcx_dimm_ctl_s { 4520232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4521215976Sjmallett uint64_t reserved_46_63 : 18; 4522232812Sjmallett uint64_t parity : 1; /**< Parity 4523232812Sjmallett The PAR_IN input of a registered DIMM should be 4524232812Sjmallett tied off. LMC adjusts the value of the DDR_WE_L (DWE#) 4525232812Sjmallett pin during DDR3 register part control word writes to 4526232812Sjmallett ensure the parity is observed correctly by the receiving 4527232812Sjmallett SSTE32882 register part. 4528232812Sjmallett When PAR_IN is grounded, PARITY should be cleared to 0. */ 4529215976Sjmallett uint64_t tcws : 13; /**< LMC waits for this time period before and after a RDIMM 4530215976Sjmallett Control Word Access during a power-up/init SEQUENCE. 4531215976Sjmallett TCWS is in multiples of 8 CK cycles. 4532215976Sjmallett Set TCWS (CSR field) = RNDUP[tcws(ns)/(8*tCYC(ns))], 4533215976Sjmallett where tCWS is the desired time (ns), and tCYC(ns) 4534215976Sjmallett is the DDR clock frequency (not data rate). 4535215976Sjmallett TYP=0x4e0 (equivalent to 15us) when changing 4536215976Sjmallett clock timing (RC2.DBA1, RC6.DA4, RC10.DA3, RC10.DA4, 4537215976Sjmallett RC11.DA3, and RC11.DA4) 4538215976Sjmallett TYP=0x8, otherwise 4539215976Sjmallett 0x0 = Reserved */ 4540215976Sjmallett uint64_t dimm1_wmask : 16; /**< DIMM1 Write Mask 4541215976Sjmallett if (DIMM1_WMASK[n] = 1) 4542215976Sjmallett Write DIMM1.RCn */ 4543215976Sjmallett uint64_t dimm0_wmask : 16; /**< DIMM0 Write Mask 4544215976Sjmallett if (DIMM0_WMASK[n] = 1) 4545215976Sjmallett Write DIMM0.RCn */ 4546215976Sjmallett#else 4547215976Sjmallett uint64_t dimm0_wmask : 16; 4548215976Sjmallett uint64_t dimm1_wmask : 16; 4549215976Sjmallett uint64_t tcws : 13; 4550215976Sjmallett uint64_t parity : 1; 4551215976Sjmallett uint64_t reserved_46_63 : 18; 4552215976Sjmallett#endif 4553215976Sjmallett } s; 4554232812Sjmallett struct cvmx_lmcx_dimm_ctl_s cn61xx; 4555215976Sjmallett struct cvmx_lmcx_dimm_ctl_s cn63xx; 4556215976Sjmallett struct cvmx_lmcx_dimm_ctl_s cn63xxp1; 4557232812Sjmallett struct cvmx_lmcx_dimm_ctl_s cn66xx; 4558232812Sjmallett struct cvmx_lmcx_dimm_ctl_s cn68xx; 4559232812Sjmallett struct cvmx_lmcx_dimm_ctl_s cn68xxp1; 4560232812Sjmallett struct cvmx_lmcx_dimm_ctl_s cnf71xx; 4561215976Sjmallett}; 4562215976Sjmalletttypedef union cvmx_lmcx_dimm_ctl cvmx_lmcx_dimm_ctl_t; 4563215976Sjmallett 4564215976Sjmallett/** 4565215976Sjmallett * cvmx_lmc#_dll_ctl 4566215976Sjmallett * 4567215976Sjmallett * LMC_DLL_CTL = LMC DLL control and DCLK reset 4568215976Sjmallett * 4569215976Sjmallett */ 4570232812Sjmallettunion cvmx_lmcx_dll_ctl { 4571215976Sjmallett uint64_t u64; 4572232812Sjmallett struct cvmx_lmcx_dll_ctl_s { 4573232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4574215976Sjmallett uint64_t reserved_8_63 : 56; 4575215976Sjmallett uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the 4576215976Sjmallett Dclk domain is (DRESET || ECLK_RESET). */ 4577215976Sjmallett uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be 4578215976Sjmallett bypassed and the setting is defined by DLL90_VLU */ 4579215976Sjmallett uint64_t dll90_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after 4580215976Sjmallett DCLK init sequence resets the DDR 90 DLL. Should 4581215976Sjmallett happen at startup before any activity in DDR. QDLL_ENA 4582215976Sjmallett must not transition 1->0 outside of a DRESET sequence 4583215976Sjmallett (i.e. it must remain 1 until the next DRESET). 4584215976Sjmallett DRESET should be asserted before and for 10 usec 4585215976Sjmallett following the 0->1 transition on QDLL_ENA. */ 4586215976Sjmallett uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay 4587215976Sjmallett line. */ 4588215976Sjmallett#else 4589215976Sjmallett uint64_t dll90_vlu : 5; 4590215976Sjmallett uint64_t dll90_ena : 1; 4591215976Sjmallett uint64_t dll90_byp : 1; 4592215976Sjmallett uint64_t dreset : 1; 4593215976Sjmallett uint64_t reserved_8_63 : 56; 4594215976Sjmallett#endif 4595215976Sjmallett } s; 4596215976Sjmallett struct cvmx_lmcx_dll_ctl_s cn52xx; 4597215976Sjmallett struct cvmx_lmcx_dll_ctl_s cn52xxp1; 4598215976Sjmallett struct cvmx_lmcx_dll_ctl_s cn56xx; 4599215976Sjmallett struct cvmx_lmcx_dll_ctl_s cn56xxp1; 4600215976Sjmallett}; 4601215976Sjmalletttypedef union cvmx_lmcx_dll_ctl cvmx_lmcx_dll_ctl_t; 4602215976Sjmallett 4603215976Sjmallett/** 4604215976Sjmallett * cvmx_lmc#_dll_ctl2 4605215976Sjmallett * 4606215976Sjmallett * LMC_DLL_CTL2 = LMC (Octeon) DLL control and DCLK reset 4607215976Sjmallett * 4608215976Sjmallett * 4609215976Sjmallett * Notes: 4610215976Sjmallett * DLL Bringup sequence: 4611215976Sjmallett * 1. If not done already, set LMC*_DLL_CTL2 = 0, except when LMC*_DLL_CTL2[DRESET] = 1. 4612215976Sjmallett * 2. Write 1 to LMC*_DLL_CTL2[DLL_BRINGUP] 4613215976Sjmallett * 3. Wait for 10 CK cycles, then write 1 to LMC*_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 CK cycles, but the 4614215976Sjmallett * idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one 4615215976Sjmallett * cycle early. LMC*_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the LMC and/or DRESET initialization 4616215976Sjmallett * sequence. 4617215976Sjmallett * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go 4618215976Sjmallett * without step 4, since step 5 has enough time) 4619215976Sjmallett * 5. Wait 10 us. 4620215976Sjmallett * 6. Write 0 to LMC*_DLL_CTL2[DLL_BRINGUP]. LMC*_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the LMC 4621215976Sjmallett * and/or DRESET initialization sequence. 4622215976Sjmallett * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it 4623215976Sjmallett * is one cycle is fine) 4624215976Sjmallett * 8. Write 0 to LMC*_DLL_CTL2[DRESET]. LMC*_DLL_CTL2[DRESET] must not change after this point without restarting the LMC and/or 4625215976Sjmallett * DRESET initialization sequence. 4626215976Sjmallett */ 4627232812Sjmallettunion cvmx_lmcx_dll_ctl2 { 4628215976Sjmallett uint64_t u64; 4629232812Sjmallett struct cvmx_lmcx_dll_ctl2_s { 4630232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4631232812Sjmallett uint64_t reserved_16_63 : 48; 4632232812Sjmallett uint64_t intf_en : 1; /**< Interface Enable */ 4633232812Sjmallett uint64_t dll_bringup : 1; /**< DLL Bringup */ 4634232812Sjmallett uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the 4635232812Sjmallett Dclk domain is (DRESET || ECLK_RESET). */ 4636232812Sjmallett uint64_t quad_dll_ena : 1; /**< DLL Enable */ 4637232812Sjmallett uint64_t byp_sel : 4; /**< Bypass select 4638232812Sjmallett 0000 : no byte 4639232812Sjmallett 0001 : byte 0 4640232812Sjmallett - ... 4641232812Sjmallett 1001 : byte 8 4642232812Sjmallett 1010 : all bytes 4643232812Sjmallett 1011-1111 : Reserved */ 4644232812Sjmallett uint64_t byp_setting : 8; /**< Bypass setting 4645232812Sjmallett DDR3-1600: 00100010 4646232812Sjmallett DDR3-1333: 00110010 4647232812Sjmallett DDR3-1066: 01001011 4648232812Sjmallett DDR3-800 : 01110101 4649232812Sjmallett DDR3-667 : 10010110 4650232812Sjmallett DDR3-600 : 10101100 */ 4651232812Sjmallett#else 4652232812Sjmallett uint64_t byp_setting : 8; 4653232812Sjmallett uint64_t byp_sel : 4; 4654232812Sjmallett uint64_t quad_dll_ena : 1; 4655232812Sjmallett uint64_t dreset : 1; 4656232812Sjmallett uint64_t dll_bringup : 1; 4657232812Sjmallett uint64_t intf_en : 1; 4658232812Sjmallett uint64_t reserved_16_63 : 48; 4659232812Sjmallett#endif 4660232812Sjmallett } s; 4661232812Sjmallett struct cvmx_lmcx_dll_ctl2_s cn61xx; 4662232812Sjmallett struct cvmx_lmcx_dll_ctl2_cn63xx { 4663232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4664215976Sjmallett uint64_t reserved_15_63 : 49; 4665215976Sjmallett uint64_t dll_bringup : 1; /**< DLL Bringup */ 4666215976Sjmallett uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the 4667215976Sjmallett Dclk domain is (DRESET || ECLK_RESET). */ 4668215976Sjmallett uint64_t quad_dll_ena : 1; /**< DLL Enable */ 4669215976Sjmallett uint64_t byp_sel : 4; /**< Bypass select 4670215976Sjmallett 0000 : no byte 4671215976Sjmallett 0001 : byte 0 4672215976Sjmallett - ... 4673215976Sjmallett 1001 : byte 8 4674215976Sjmallett 1010 : all bytes 4675215976Sjmallett 1011-1111 : Reserved */ 4676215976Sjmallett uint64_t byp_setting : 8; /**< Bypass setting 4677215976Sjmallett DDR3-1600: 00100010 4678215976Sjmallett DDR3-1333: 00110010 4679215976Sjmallett DDR3-1066: 01001011 4680215976Sjmallett DDR3-800 : 01110101 4681215976Sjmallett DDR3-667 : 10010110 4682215976Sjmallett DDR3-600 : 10101100 */ 4683215976Sjmallett#else 4684215976Sjmallett uint64_t byp_setting : 8; 4685215976Sjmallett uint64_t byp_sel : 4; 4686215976Sjmallett uint64_t quad_dll_ena : 1; 4687215976Sjmallett uint64_t dreset : 1; 4688215976Sjmallett uint64_t dll_bringup : 1; 4689215976Sjmallett uint64_t reserved_15_63 : 49; 4690215976Sjmallett#endif 4691232812Sjmallett } cn63xx; 4692232812Sjmallett struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1; 4693232812Sjmallett struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx; 4694232812Sjmallett struct cvmx_lmcx_dll_ctl2_s cn68xx; 4695232812Sjmallett struct cvmx_lmcx_dll_ctl2_s cn68xxp1; 4696232812Sjmallett struct cvmx_lmcx_dll_ctl2_s cnf71xx; 4697215976Sjmallett}; 4698215976Sjmalletttypedef union cvmx_lmcx_dll_ctl2 cvmx_lmcx_dll_ctl2_t; 4699215976Sjmallett 4700215976Sjmallett/** 4701215976Sjmallett * cvmx_lmc#_dll_ctl3 4702215976Sjmallett * 4703215976Sjmallett * LMC_DLL_CTL3 = LMC DLL control and DCLK reset 4704215976Sjmallett * 4705215976Sjmallett */ 4706232812Sjmallettunion cvmx_lmcx_dll_ctl3 { 4707215976Sjmallett uint64_t u64; 4708232812Sjmallett struct cvmx_lmcx_dll_ctl3_s { 4709232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4710232812Sjmallett uint64_t reserved_41_63 : 23; 4711232812Sjmallett uint64_t dclk90_fwd : 1; /**< Forward setting 4712232812Sjmallett 0 : disable 4713232812Sjmallett 1 : forward (generates a 1 cycle pulse to forward setting) 4714232812Sjmallett This register is oneshot and clears itself each time 4715232812Sjmallett it is set */ 4716232812Sjmallett uint64_t ddr_90_dly_byp : 1; /**< Bypass DDR90_DLY in Clock Tree */ 4717232812Sjmallett uint64_t dclk90_recal_dis : 1; /**< Disable periodic recalibration of DDR90 Delay Line in */ 4718232812Sjmallett uint64_t dclk90_byp_sel : 1; /**< Bypass Setting Select for DDR90 Delay Line */ 4719232812Sjmallett uint64_t dclk90_byp_setting : 8; /**< Bypass Setting for DDR90 Delay Line */ 4720232812Sjmallett uint64_t dll_fast : 1; /**< DLL lock 4721232812Sjmallett 0 = DLL locked */ 4722232812Sjmallett uint64_t dll90_setting : 8; /**< Encoded DLL settings. Works in conjuction with 4723232812Sjmallett DLL90_BYTE_SEL */ 4724232812Sjmallett uint64_t fine_tune_mode : 1; /**< DLL Fine Tune Mode 4725232812Sjmallett 0 = disabled 4726232812Sjmallett 1 = enable. 4727232812Sjmallett When enabled, calibrate internal PHY DLL every 4728232812Sjmallett LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 4729232812Sjmallett uint64_t dll_mode : 1; /**< DLL Mode */ 4730232812Sjmallett uint64_t dll90_byte_sel : 4; /**< Observe DLL settings for selected byte 4731232812Sjmallett 0001 : byte 0 4732232812Sjmallett - ... 4733232812Sjmallett 1001 : byte 8 4734232812Sjmallett 0000,1010-1111 : Reserved */ 4735232812Sjmallett uint64_t offset_ena : 1; /**< Offset enable 4736232812Sjmallett 0 = disable 4737232812Sjmallett 1 = enable */ 4738232812Sjmallett uint64_t load_offset : 1; /**< Load offset 4739232812Sjmallett 0 : disable 4740232812Sjmallett 1 : load (generates a 1 cycle pulse to the PHY) 4741232812Sjmallett This register is oneshot and clears itself each time 4742232812Sjmallett it is set */ 4743232812Sjmallett uint64_t mode_sel : 2; /**< Mode select 4744232812Sjmallett 00 : reset 4745232812Sjmallett 01 : write 4746232812Sjmallett 10 : read 4747232812Sjmallett 11 : write & read */ 4748232812Sjmallett uint64_t byte_sel : 4; /**< Byte select 4749232812Sjmallett 0000 : no byte 4750232812Sjmallett 0001 : byte 0 4751232812Sjmallett - ... 4752232812Sjmallett 1001 : byte 8 4753232812Sjmallett 1010 : all bytes 4754232812Sjmallett 1011-1111 : Reserved */ 4755232812Sjmallett uint64_t offset : 6; /**< Write/read offset setting 4756232812Sjmallett [4:0] : offset 4757232812Sjmallett [5] : 0 = increment, 1 = decrement 4758232812Sjmallett Not a 2's complement value */ 4759232812Sjmallett#else 4760232812Sjmallett uint64_t offset : 6; 4761232812Sjmallett uint64_t byte_sel : 4; 4762232812Sjmallett uint64_t mode_sel : 2; 4763232812Sjmallett uint64_t load_offset : 1; 4764232812Sjmallett uint64_t offset_ena : 1; 4765232812Sjmallett uint64_t dll90_byte_sel : 4; 4766232812Sjmallett uint64_t dll_mode : 1; 4767232812Sjmallett uint64_t fine_tune_mode : 1; 4768232812Sjmallett uint64_t dll90_setting : 8; 4769232812Sjmallett uint64_t dll_fast : 1; 4770232812Sjmallett uint64_t dclk90_byp_setting : 8; 4771232812Sjmallett uint64_t dclk90_byp_sel : 1; 4772232812Sjmallett uint64_t dclk90_recal_dis : 1; 4773232812Sjmallett uint64_t ddr_90_dly_byp : 1; 4774232812Sjmallett uint64_t dclk90_fwd : 1; 4775232812Sjmallett uint64_t reserved_41_63 : 23; 4776232812Sjmallett#endif 4777232812Sjmallett } s; 4778232812Sjmallett struct cvmx_lmcx_dll_ctl3_s cn61xx; 4779232812Sjmallett struct cvmx_lmcx_dll_ctl3_cn63xx { 4780232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4781215976Sjmallett uint64_t reserved_29_63 : 35; 4782215976Sjmallett uint64_t dll_fast : 1; /**< DLL lock 4783215976Sjmallett 0 = DLL locked */ 4784215976Sjmallett uint64_t dll90_setting : 8; /**< Encoded DLL settings. Works in conjuction with 4785215976Sjmallett DLL90_BYTE_SEL */ 4786215976Sjmallett uint64_t fine_tune_mode : 1; /**< DLL Fine Tune Mode 4787215976Sjmallett 0 = disabled 4788215976Sjmallett 1 = enable. 4789215976Sjmallett When enabled, calibrate internal PHY DLL every 4790215976Sjmallett LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */ 4791215976Sjmallett uint64_t dll_mode : 1; /**< DLL Mode */ 4792215976Sjmallett uint64_t dll90_byte_sel : 4; /**< Observe DLL settings for selected byte 4793215976Sjmallett 0001 : byte 0 4794215976Sjmallett - ... 4795215976Sjmallett 1001 : byte 8 4796215976Sjmallett 0000,1010-1111 : Reserved */ 4797215976Sjmallett uint64_t offset_ena : 1; /**< Offset enable 4798215976Sjmallett 0 = disable 4799215976Sjmallett 1 = enable */ 4800215976Sjmallett uint64_t load_offset : 1; /**< Load offset 4801215976Sjmallett 0 : disable 4802215976Sjmallett 1 : load (generates a 1 cycle pulse to the PHY) 4803215976Sjmallett This register is oneshot and clears itself each time 4804215976Sjmallett it is set */ 4805215976Sjmallett uint64_t mode_sel : 2; /**< Mode select 4806215976Sjmallett 00 : reset 4807215976Sjmallett 01 : write 4808215976Sjmallett 10 : read 4809215976Sjmallett 11 : write & read */ 4810215976Sjmallett uint64_t byte_sel : 4; /**< Byte select 4811215976Sjmallett 0000 : no byte 4812215976Sjmallett 0001 : byte 0 4813215976Sjmallett - ... 4814215976Sjmallett 1001 : byte 8 4815215976Sjmallett 1010 : all bytes 4816215976Sjmallett 1011-1111 : Reserved */ 4817215976Sjmallett uint64_t offset : 6; /**< Write/read offset setting 4818215976Sjmallett [4:0] : offset 4819215976Sjmallett [5] : 0 = increment, 1 = decrement 4820215976Sjmallett Not a 2's complement value */ 4821215976Sjmallett#else 4822215976Sjmallett uint64_t offset : 6; 4823215976Sjmallett uint64_t byte_sel : 4; 4824215976Sjmallett uint64_t mode_sel : 2; 4825215976Sjmallett uint64_t load_offset : 1; 4826215976Sjmallett uint64_t offset_ena : 1; 4827215976Sjmallett uint64_t dll90_byte_sel : 4; 4828215976Sjmallett uint64_t dll_mode : 1; 4829215976Sjmallett uint64_t fine_tune_mode : 1; 4830215976Sjmallett uint64_t dll90_setting : 8; 4831215976Sjmallett uint64_t dll_fast : 1; 4832215976Sjmallett uint64_t reserved_29_63 : 35; 4833215976Sjmallett#endif 4834232812Sjmallett } cn63xx; 4835232812Sjmallett struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1; 4836232812Sjmallett struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx; 4837232812Sjmallett struct cvmx_lmcx_dll_ctl3_s cn68xx; 4838232812Sjmallett struct cvmx_lmcx_dll_ctl3_s cn68xxp1; 4839232812Sjmallett struct cvmx_lmcx_dll_ctl3_s cnf71xx; 4840215976Sjmallett}; 4841215976Sjmalletttypedef union cvmx_lmcx_dll_ctl3 cvmx_lmcx_dll_ctl3_t; 4842215976Sjmallett 4843215976Sjmallett/** 4844215976Sjmallett * cvmx_lmc#_dual_memcfg 4845215976Sjmallett * 4846215976Sjmallett * LMC_DUAL_MEMCFG = LMC Dual Memory Configuration Register 4847215976Sjmallett * 4848215976Sjmallett * This register controls certain parameters of Dual Memory Configuration 4849215976Sjmallett * 4850215976Sjmallett * Notes: 4851215976Sjmallett * This register enables the design to have two, separate memory configurations, selected dynamically 4852215976Sjmallett * by the reference address. Note however, that both configurations share 4853215976Sjmallett * LMC*_CONTROL[XOR_BANK], LMC*_CONFIG[PBANK_LSB], LMC*_CONFIG[RANK_ENA], and all timing parameters. 4854215976Sjmallett * In this description, "config0" refers to the normal memory configuration that is defined by the 4855215976Sjmallett * LMC*_CONFIG[ROW_LSB] parameters and "config1" refers to the dual (or second) 4856215976Sjmallett * memory configuration that is defined by this register. 4857215976Sjmallett * 4858215976Sjmallett * Enable mask to chip select mapping is shown below: 4859215976Sjmallett * CS_MASK[3] -> DIMM1_CS_<1> 4860215976Sjmallett * CS_MASK[2] -> DIMM1_CS_<0> 4861215976Sjmallett * 4862215976Sjmallett * CS_MASK[1] -> DIMM0_CS_<1> 4863215976Sjmallett * CS_MASK[0] -> DIMM0_CS_<0> 4864215976Sjmallett * 4865215976Sjmallett * DIMM n uses the pair of chip selects DIMMn_CS_<1:0>. 4866215976Sjmallett * 4867215976Sjmallett * Programming restrictions for CS_MASK: 4868215976Sjmallett * when LMC*_CONFIG[RANK_ENA] == 0, CS_MASK[2n + 1] = CS_MASK[2n] 4869215976Sjmallett */ 4870232812Sjmallettunion cvmx_lmcx_dual_memcfg { 4871215976Sjmallett uint64_t u64; 4872232812Sjmallett struct cvmx_lmcx_dual_memcfg_s { 4873232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4874215976Sjmallett uint64_t reserved_20_63 : 44; 4875215976Sjmallett uint64_t bank8 : 1; /**< See LMC_DDR2_CTL[BANK8] */ 4876215976Sjmallett uint64_t row_lsb : 3; /**< See LMC*_CONFIG[ROW_LSB] */ 4877215976Sjmallett uint64_t reserved_8_15 : 8; 4878215976Sjmallett uint64_t cs_mask : 8; /**< Chip select mask. 4879215976Sjmallett This mask corresponds to the 8 chip selects for a memory 4880215976Sjmallett configuration. Each reference address will assert one of 4881215976Sjmallett the chip selects. If that chip select has its 4882215976Sjmallett corresponding CS_MASK bit set, then the "config1" 4883215976Sjmallett parameters are used, otherwise the "config0" parameters 4884215976Sjmallett are used. See additional notes below. 4885232812Sjmallett [7:4] *UNUSED IN 6xxx* */ 4886215976Sjmallett#else 4887215976Sjmallett uint64_t cs_mask : 8; 4888215976Sjmallett uint64_t reserved_8_15 : 8; 4889215976Sjmallett uint64_t row_lsb : 3; 4890215976Sjmallett uint64_t bank8 : 1; 4891215976Sjmallett uint64_t reserved_20_63 : 44; 4892215976Sjmallett#endif 4893215976Sjmallett } s; 4894215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn50xx; 4895215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn52xx; 4896215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn52xxp1; 4897215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn56xx; 4898215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn56xxp1; 4899215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn58xx; 4900215976Sjmallett struct cvmx_lmcx_dual_memcfg_s cn58xxp1; 4901232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx { 4902232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4903215976Sjmallett uint64_t reserved_19_63 : 45; 4904215976Sjmallett uint64_t row_lsb : 3; /**< See LMC*_CONFIG[ROW_LSB] */ 4905215976Sjmallett uint64_t reserved_8_15 : 8; 4906215976Sjmallett uint64_t cs_mask : 8; /**< Chip select mask. 4907215976Sjmallett This mask corresponds to the 8 chip selects for a memory 4908215976Sjmallett configuration. Each reference address will assert one of 4909215976Sjmallett the chip selects. If that chip select has its 4910215976Sjmallett corresponding CS_MASK bit set, then the "config1" 4911215976Sjmallett parameters are used, otherwise the "config0" parameters 4912215976Sjmallett are used. See additional notes below. 4913232812Sjmallett [7:4] *UNUSED IN 6xxx* */ 4914215976Sjmallett#else 4915215976Sjmallett uint64_t cs_mask : 8; 4916215976Sjmallett uint64_t reserved_8_15 : 8; 4917215976Sjmallett uint64_t row_lsb : 3; 4918215976Sjmallett uint64_t reserved_19_63 : 45; 4919215976Sjmallett#endif 4920232812Sjmallett } cn61xx; 4921232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx; 4922232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1; 4923232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx; 4924232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx; 4925232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1; 4926232812Sjmallett struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx; 4927215976Sjmallett}; 4928215976Sjmalletttypedef union cvmx_lmcx_dual_memcfg cvmx_lmcx_dual_memcfg_t; 4929215976Sjmallett 4930215976Sjmallett/** 4931215976Sjmallett * cvmx_lmc#_ecc_synd 4932215976Sjmallett * 4933215976Sjmallett * LMC_ECC_SYND = MRD ECC Syndromes 4934215976Sjmallett * 4935215976Sjmallett */ 4936232812Sjmallettunion cvmx_lmcx_ecc_synd { 4937215976Sjmallett uint64_t u64; 4938232812Sjmallett struct cvmx_lmcx_ecc_synd_s { 4939232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 4940215976Sjmallett uint64_t reserved_32_63 : 32; 4941215976Sjmallett uint64_t mrdsyn3 : 8; /**< MRD ECC Syndrome Quad3 4942215976Sjmallett MRDSYN3 corresponds to DQ[63:0]_c1_p1 4943232812Sjmallett In 32b mode, ecc is calculated on 4 cycle worth of data 4944232812Sjmallett MRDSYN3 corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0] 4945215976Sjmallett where _cC_pP denotes cycle C and phase P */ 4946215976Sjmallett uint64_t mrdsyn2 : 8; /**< MRD ECC Syndrome Quad2 4947215976Sjmallett MRDSYN2 corresponds to DQ[63:0]_c1_p0 4948232812Sjmallett In 32b mode, ecc is calculated on 4 cycle worth of data 4949232812Sjmallett MRDSYN2 corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0] 4950215976Sjmallett where _cC_pP denotes cycle C and phase P */ 4951215976Sjmallett uint64_t mrdsyn1 : 8; /**< MRD ECC Syndrome Quad1 4952215976Sjmallett MRDSYN1 corresponds to DQ[63:0]_c0_p1 4953232812Sjmallett In 32b mode, ecc is calculated on 4 cycle worth of data 4954232812Sjmallett MRDSYN1 corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0] 4955215976Sjmallett where _cC_pP denotes cycle C and phase P */ 4956215976Sjmallett uint64_t mrdsyn0 : 8; /**< MRD ECC Syndrome Quad0 4957215976Sjmallett MRDSYN0 corresponds to DQ[63:0]_c0_p0 4958232812Sjmallett In 32b mode, ecc is calculated on 4 cycle worth of data 4959232812Sjmallett MRDSYN0 corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0] 4960215976Sjmallett where _cC_pP denotes cycle C and phase P */ 4961215976Sjmallett#else 4962215976Sjmallett uint64_t mrdsyn0 : 8; 4963215976Sjmallett uint64_t mrdsyn1 : 8; 4964215976Sjmallett uint64_t mrdsyn2 : 8; 4965215976Sjmallett uint64_t mrdsyn3 : 8; 4966215976Sjmallett uint64_t reserved_32_63 : 32; 4967215976Sjmallett#endif 4968215976Sjmallett } s; 4969215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn30xx; 4970215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn31xx; 4971215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn38xx; 4972215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn38xxp2; 4973215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn50xx; 4974215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn52xx; 4975215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn52xxp1; 4976215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn56xx; 4977215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn56xxp1; 4978215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn58xx; 4979215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn58xxp1; 4980232812Sjmallett struct cvmx_lmcx_ecc_synd_s cn61xx; 4981215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn63xx; 4982215976Sjmallett struct cvmx_lmcx_ecc_synd_s cn63xxp1; 4983232812Sjmallett struct cvmx_lmcx_ecc_synd_s cn66xx; 4984232812Sjmallett struct cvmx_lmcx_ecc_synd_s cn68xx; 4985232812Sjmallett struct cvmx_lmcx_ecc_synd_s cn68xxp1; 4986232812Sjmallett struct cvmx_lmcx_ecc_synd_s cnf71xx; 4987215976Sjmallett}; 4988215976Sjmalletttypedef union cvmx_lmcx_ecc_synd cvmx_lmcx_ecc_synd_t; 4989215976Sjmallett 4990215976Sjmallett/** 4991215976Sjmallett * cvmx_lmc#_fadr 4992215976Sjmallett * 4993215976Sjmallett * LMC_FADR = LMC Failing Address Register (SEC/DED/NXM) 4994215976Sjmallett * 4995215976Sjmallett * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can 4996215976Sjmallett * over-write this register with its failing addresses if the first error was a SEC. If you write 4997215976Sjmallett * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the 4998215976Sjmallett * next failing address. 4999215976Sjmallett * 5000215976Sjmallett * If FDIMM is 2 that means the error is in the higher bits DIMM. 5001232812Sjmallett * 5002232812Sjmallett * Notes: 5003232812Sjmallett * LMC*_FADR captures the failing pre-scrambled address location (split into dimm, bunk, bank, etc). If 5004232812Sjmallett * scrambling is off, then LMC*_FADR will also capture the failing physical location in the DRAM parts. 5005232812Sjmallett * 5006232812Sjmallett * LMC*_SCRAMBLED_FADR captures the actual failing address location in the physical DRAM parts, i.e., 5007232812Sjmallett * a. if scrambling is on, LMC*_SCRAMBLE_FADR contains the failing physical location in the DRAM parts (split 5008232812Sjmallett * into dimm, bunk, bank, etc) 5009232812Sjmallett * b. if scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the contents of 5010232812Sjmallett * LMC*_SCRAMBLED_FADR match the contents of LMC*_FADR 5011215976Sjmallett */ 5012232812Sjmallettunion cvmx_lmcx_fadr { 5013215976Sjmallett uint64_t u64; 5014232812Sjmallett struct cvmx_lmcx_fadr_s { 5015232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5016215976Sjmallett uint64_t reserved_0_63 : 64; 5017215976Sjmallett#else 5018215976Sjmallett uint64_t reserved_0_63 : 64; 5019215976Sjmallett#endif 5020215976Sjmallett } s; 5021232812Sjmallett struct cvmx_lmcx_fadr_cn30xx { 5022232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5023215976Sjmallett uint64_t reserved_32_63 : 32; 5024215976Sjmallett uint64_t fdimm : 2; /**< Failing DIMM# */ 5025215976Sjmallett uint64_t fbunk : 1; /**< Failing Rank */ 5026215976Sjmallett uint64_t fbank : 3; /**< Failing Bank[2:0] */ 5027215976Sjmallett uint64_t frow : 14; /**< Failing Row Address[13:0] */ 5028215976Sjmallett uint64_t fcol : 12; /**< Failing Column Start Address[11:0] 5029215976Sjmallett Represents the Failing read's starting column address 5030215976Sjmallett (and not the exact column address in which the SEC/DED 5031215976Sjmallett was detected) */ 5032215976Sjmallett#else 5033215976Sjmallett uint64_t fcol : 12; 5034215976Sjmallett uint64_t frow : 14; 5035215976Sjmallett uint64_t fbank : 3; 5036215976Sjmallett uint64_t fbunk : 1; 5037215976Sjmallett uint64_t fdimm : 2; 5038215976Sjmallett uint64_t reserved_32_63 : 32; 5039215976Sjmallett#endif 5040215976Sjmallett } cn30xx; 5041215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn31xx; 5042215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn38xx; 5043215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn38xxp2; 5044215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn50xx; 5045215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn52xx; 5046215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn52xxp1; 5047215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn56xx; 5048215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn56xxp1; 5049215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn58xx; 5050215976Sjmallett struct cvmx_lmcx_fadr_cn30xx cn58xxp1; 5051232812Sjmallett struct cvmx_lmcx_fadr_cn61xx { 5052232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5053215976Sjmallett uint64_t reserved_36_63 : 28; 5054215976Sjmallett uint64_t fdimm : 2; /**< Failing DIMM# */ 5055215976Sjmallett uint64_t fbunk : 1; /**< Failing Rank */ 5056215976Sjmallett uint64_t fbank : 3; /**< Failing Bank[2:0] */ 5057215976Sjmallett uint64_t frow : 16; /**< Failing Row Address[15:0] */ 5058215976Sjmallett uint64_t fcol : 14; /**< Failing Column Address[13:0] 5059215976Sjmallett Technically, represents the address of the 128b data 5060215976Sjmallett that had an ecc error, i.e., fcol[0] is always 0. Can 5061215976Sjmallett be used in conjuction with LMC*_CONFIG[DED_ERR] to 5062215976Sjmallett isolate the 64b chunk of data in error */ 5063215976Sjmallett#else 5064215976Sjmallett uint64_t fcol : 14; 5065215976Sjmallett uint64_t frow : 16; 5066215976Sjmallett uint64_t fbank : 3; 5067215976Sjmallett uint64_t fbunk : 1; 5068215976Sjmallett uint64_t fdimm : 2; 5069215976Sjmallett uint64_t reserved_36_63 : 28; 5070215976Sjmallett#endif 5071232812Sjmallett } cn61xx; 5072232812Sjmallett struct cvmx_lmcx_fadr_cn61xx cn63xx; 5073232812Sjmallett struct cvmx_lmcx_fadr_cn61xx cn63xxp1; 5074232812Sjmallett struct cvmx_lmcx_fadr_cn61xx cn66xx; 5075232812Sjmallett struct cvmx_lmcx_fadr_cn61xx cn68xx; 5076232812Sjmallett struct cvmx_lmcx_fadr_cn61xx cn68xxp1; 5077232812Sjmallett struct cvmx_lmcx_fadr_cn61xx cnf71xx; 5078215976Sjmallett}; 5079215976Sjmalletttypedef union cvmx_lmcx_fadr cvmx_lmcx_fadr_t; 5080215976Sjmallett 5081215976Sjmallett/** 5082215976Sjmallett * cvmx_lmc#_ifb_cnt 5083215976Sjmallett * 5084215976Sjmallett * LMC_IFB_CNT = Performance Counters 5085215976Sjmallett * 5086215976Sjmallett */ 5087232812Sjmallettunion cvmx_lmcx_ifb_cnt { 5088215976Sjmallett uint64_t u64; 5089232812Sjmallett struct cvmx_lmcx_ifb_cnt_s { 5090232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5091215976Sjmallett uint64_t ifbcnt : 64; /**< Performance Counter 5092215976Sjmallett 64-bit counter that increments every 5093215976Sjmallett CK cycle there is something in the in-flight buffer. */ 5094215976Sjmallett#else 5095215976Sjmallett uint64_t ifbcnt : 64; 5096215976Sjmallett#endif 5097215976Sjmallett } s; 5098232812Sjmallett struct cvmx_lmcx_ifb_cnt_s cn61xx; 5099215976Sjmallett struct cvmx_lmcx_ifb_cnt_s cn63xx; 5100215976Sjmallett struct cvmx_lmcx_ifb_cnt_s cn63xxp1; 5101232812Sjmallett struct cvmx_lmcx_ifb_cnt_s cn66xx; 5102232812Sjmallett struct cvmx_lmcx_ifb_cnt_s cn68xx; 5103232812Sjmallett struct cvmx_lmcx_ifb_cnt_s cn68xxp1; 5104232812Sjmallett struct cvmx_lmcx_ifb_cnt_s cnf71xx; 5105215976Sjmallett}; 5106215976Sjmalletttypedef union cvmx_lmcx_ifb_cnt cvmx_lmcx_ifb_cnt_t; 5107215976Sjmallett 5108215976Sjmallett/** 5109215976Sjmallett * cvmx_lmc#_ifb_cnt_hi 5110215976Sjmallett * 5111215976Sjmallett * LMC_IFB_CNT_HI = Performance Counters 5112215976Sjmallett * 5113215976Sjmallett */ 5114232812Sjmallettunion cvmx_lmcx_ifb_cnt_hi { 5115215976Sjmallett uint64_t u64; 5116232812Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s { 5117232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5118215976Sjmallett uint64_t reserved_32_63 : 32; 5119215976Sjmallett uint64_t ifbcnt_hi : 32; /**< Performance Counter to measure Bus Utilization 5120215976Sjmallett Upper 32-bits of 64-bit counter that increments every 5121215976Sjmallett cycle there is something in the in-flight buffer. */ 5122215976Sjmallett#else 5123215976Sjmallett uint64_t ifbcnt_hi : 32; 5124215976Sjmallett uint64_t reserved_32_63 : 32; 5125215976Sjmallett#endif 5126215976Sjmallett } s; 5127215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn30xx; 5128215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn31xx; 5129215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn38xx; 5130215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2; 5131215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn50xx; 5132215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn52xx; 5133215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1; 5134215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn56xx; 5135215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1; 5136215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn58xx; 5137215976Sjmallett struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1; 5138215976Sjmallett}; 5139215976Sjmalletttypedef union cvmx_lmcx_ifb_cnt_hi cvmx_lmcx_ifb_cnt_hi_t; 5140215976Sjmallett 5141215976Sjmallett/** 5142215976Sjmallett * cvmx_lmc#_ifb_cnt_lo 5143215976Sjmallett * 5144215976Sjmallett * LMC_IFB_CNT_LO = Performance Counters 5145215976Sjmallett * 5146215976Sjmallett */ 5147232812Sjmallettunion cvmx_lmcx_ifb_cnt_lo { 5148215976Sjmallett uint64_t u64; 5149232812Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s { 5150232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5151215976Sjmallett uint64_t reserved_32_63 : 32; 5152215976Sjmallett uint64_t ifbcnt_lo : 32; /**< Performance Counter 5153215976Sjmallett Low 32-bits of 64-bit counter that increments every 5154215976Sjmallett cycle there is something in the in-flight buffer. */ 5155215976Sjmallett#else 5156215976Sjmallett uint64_t ifbcnt_lo : 32; 5157215976Sjmallett uint64_t reserved_32_63 : 32; 5158215976Sjmallett#endif 5159215976Sjmallett } s; 5160215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn30xx; 5161215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn31xx; 5162215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn38xx; 5163215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2; 5164215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn50xx; 5165215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn52xx; 5166215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1; 5167215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn56xx; 5168215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1; 5169215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn58xx; 5170215976Sjmallett struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1; 5171215976Sjmallett}; 5172215976Sjmalletttypedef union cvmx_lmcx_ifb_cnt_lo cvmx_lmcx_ifb_cnt_lo_t; 5173215976Sjmallett 5174215976Sjmallett/** 5175215976Sjmallett * cvmx_lmc#_int 5176215976Sjmallett * 5177215976Sjmallett * LMC_INT = LMC Interrupt Register 5178215976Sjmallett * 5179215976Sjmallett */ 5180232812Sjmallettunion cvmx_lmcx_int { 5181215976Sjmallett uint64_t u64; 5182232812Sjmallett struct cvmx_lmcx_int_s { 5183232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5184215976Sjmallett uint64_t reserved_9_63 : 55; 5185215976Sjmallett uint64_t ded_err : 4; /**< Double Error detected (DED) of Rd Data 5186215976Sjmallett [0] corresponds to DQ[63:0]_c0_p0 5187215976Sjmallett [1] corresponds to DQ[63:0]_c0_p1 5188215976Sjmallett [2] corresponds to DQ[63:0]_c1_p0 5189215976Sjmallett [3] corresponds to DQ[63:0]_c1_p1 5190232812Sjmallett In 32b mode, ecc is calculated on 4 cycle worth of data 5191232812Sjmallett [0] corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0] 5192232812Sjmallett [1] corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0] 5193232812Sjmallett [2] corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0] 5194232812Sjmallett [3] corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0] 5195232812Sjmallett where _cC_pP denotes cycle C and phase P 5196232812Sjmallett Write of 1 will clear the corresponding error bit */ 5197215976Sjmallett uint64_t sec_err : 4; /**< Single Error (corrected) of Rd Data 5198215976Sjmallett [0] corresponds to DQ[63:0]_c0_p0 5199215976Sjmallett [1] corresponds to DQ[63:0]_c0_p1 5200215976Sjmallett [2] corresponds to DQ[63:0]_c1_p0 5201215976Sjmallett [3] corresponds to DQ[63:0]_c1_p1 5202232812Sjmallett In 32b mode, ecc is calculated on 4 cycle worth of data 5203232812Sjmallett [0] corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0] 5204232812Sjmallett [1] corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0] 5205232812Sjmallett [2] corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0] 5206232812Sjmallett [3] corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0] 5207232812Sjmallett where _cC_pP denotes cycle C and phase P 5208232812Sjmallett Write of 1 will clear the corresponding error bit */ 5209215976Sjmallett uint64_t nxm_wr_err : 1; /**< Write to non-existent memory 5210215976Sjmallett Write of 1 will clear the corresponding error bit */ 5211215976Sjmallett#else 5212215976Sjmallett uint64_t nxm_wr_err : 1; 5213215976Sjmallett uint64_t sec_err : 4; 5214215976Sjmallett uint64_t ded_err : 4; 5215215976Sjmallett uint64_t reserved_9_63 : 55; 5216215976Sjmallett#endif 5217215976Sjmallett } s; 5218232812Sjmallett struct cvmx_lmcx_int_s cn61xx; 5219215976Sjmallett struct cvmx_lmcx_int_s cn63xx; 5220215976Sjmallett struct cvmx_lmcx_int_s cn63xxp1; 5221232812Sjmallett struct cvmx_lmcx_int_s cn66xx; 5222232812Sjmallett struct cvmx_lmcx_int_s cn68xx; 5223232812Sjmallett struct cvmx_lmcx_int_s cn68xxp1; 5224232812Sjmallett struct cvmx_lmcx_int_s cnf71xx; 5225215976Sjmallett}; 5226215976Sjmalletttypedef union cvmx_lmcx_int cvmx_lmcx_int_t; 5227215976Sjmallett 5228215976Sjmallett/** 5229215976Sjmallett * cvmx_lmc#_int_en 5230215976Sjmallett * 5231215976Sjmallett * LMC_INT_EN = LMC Interrupt Enable Register 5232215976Sjmallett * 5233215976Sjmallett */ 5234232812Sjmallettunion cvmx_lmcx_int_en { 5235215976Sjmallett uint64_t u64; 5236232812Sjmallett struct cvmx_lmcx_int_en_s { 5237232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5238215976Sjmallett uint64_t reserved_3_63 : 61; 5239215976Sjmallett uint64_t intr_ded_ena : 1; /**< ECC Double Error Detect(DED) Interrupt Enable bit 5240215976Sjmallett When set, the memory controller raises a processor 5241215976Sjmallett interrupt on detecting an uncorrectable Dbl Bit ECC 5242215976Sjmallett error. */ 5243215976Sjmallett uint64_t intr_sec_ena : 1; /**< ECC Single Error Correct(SEC) Interrupt Enable bit 5244215976Sjmallett When set, the memory controller raises a processor 5245215976Sjmallett interrupt on detecting a correctable Single Bit ECC 5246215976Sjmallett error. */ 5247215976Sjmallett uint64_t intr_nxm_wr_ena : 1; /**< Non Write Error Interrupt Enable bit 5248215976Sjmallett When set, the memory controller raises a processor 5249215976Sjmallett interrupt on detecting an non-existent memory write */ 5250215976Sjmallett#else 5251215976Sjmallett uint64_t intr_nxm_wr_ena : 1; 5252215976Sjmallett uint64_t intr_sec_ena : 1; 5253215976Sjmallett uint64_t intr_ded_ena : 1; 5254215976Sjmallett uint64_t reserved_3_63 : 61; 5255215976Sjmallett#endif 5256215976Sjmallett } s; 5257232812Sjmallett struct cvmx_lmcx_int_en_s cn61xx; 5258215976Sjmallett struct cvmx_lmcx_int_en_s cn63xx; 5259215976Sjmallett struct cvmx_lmcx_int_en_s cn63xxp1; 5260232812Sjmallett struct cvmx_lmcx_int_en_s cn66xx; 5261232812Sjmallett struct cvmx_lmcx_int_en_s cn68xx; 5262232812Sjmallett struct cvmx_lmcx_int_en_s cn68xxp1; 5263232812Sjmallett struct cvmx_lmcx_int_en_s cnf71xx; 5264215976Sjmallett}; 5265215976Sjmalletttypedef union cvmx_lmcx_int_en cvmx_lmcx_int_en_t; 5266215976Sjmallett 5267215976Sjmallett/** 5268215976Sjmallett * cvmx_lmc#_mem_cfg0 5269215976Sjmallett * 5270215976Sjmallett * Specify the RSL base addresses for the block 5271215976Sjmallett * 5272215976Sjmallett * LMC_MEM_CFG0 = LMC Memory Configuration Register0 5273215976Sjmallett * 5274215976Sjmallett * This register controls certain parameters of Memory Configuration 5275215976Sjmallett */ 5276232812Sjmallettunion cvmx_lmcx_mem_cfg0 { 5277215976Sjmallett uint64_t u64; 5278232812Sjmallett struct cvmx_lmcx_mem_cfg0_s { 5279232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5280215976Sjmallett uint64_t reserved_32_63 : 32; 5281215976Sjmallett uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter, 5282215976Sjmallett and LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_* 5283215976Sjmallett CSR's. SW should write this to a one, then re-write 5284215976Sjmallett it to a zero to cause the reset. */ 5285215976Sjmallett uint64_t silo_qc : 1; /**< Adds a Quarter Cycle granularity to generate 5286215976Sjmallett dqs pulse generation for silo. 5287215976Sjmallett Combination of Silo_HC and Silo_QC gives the 5288215976Sjmallett ability to position the read enable with quarter 5289215976Sjmallett cycle resolution. This is applied on all the bytes 5290215976Sjmallett uniformly. */ 5291215976Sjmallett uint64_t bunk_ena : 1; /**< Bunk Enable aka RANK ena (for use with dual-rank DIMMs) 5292215976Sjmallett For dual-rank DIMMs, the bunk_ena bit will enable 5293215976Sjmallett the drive of the CS_N[1:0] pins based on the 5294215976Sjmallett (pbank_lsb-1) address bit. 5295215976Sjmallett Write 0 for SINGLE ranked DIMM's. */ 5296215976Sjmallett uint64_t ded_err : 4; /**< Double Error detected (DED) of Rd Data 5297215976Sjmallett In 128b mode, ecc is calulated on 1 cycle worth of data 5298215976Sjmallett [25] corresponds to DQ[63:0], Phase0 5299215976Sjmallett [26] corresponds to DQ[127:64], Phase0 5300215976Sjmallett [27] corresponds to DQ[63:0], Phase1 5301215976Sjmallett [28] corresponds to DQ[127:64], Phase1 5302215976Sjmallett In 64b mode, ecc is calculated on 2 cycle worth of data 5303215976Sjmallett [25] corresponds to DQ[63:0], Phase0, cycle0 5304215976Sjmallett [26] corresponds to DQ[63:0], Phase0, cycle1 5305215976Sjmallett [27] corresponds to DQ[63:0], Phase1, cycle0 5306215976Sjmallett [28] corresponds to DQ[63:0], Phase1, cycle1 5307215976Sjmallett Write of 1 will clear the corresponding error bit */ 5308215976Sjmallett uint64_t sec_err : 4; /**< Single Error (corrected) of Rd Data 5309215976Sjmallett In 128b mode, ecc is calulated on 1 cycle worth of data 5310215976Sjmallett [21] corresponds to DQ[63:0], Phase0 5311215976Sjmallett [22] corresponds to DQ[127:64], Phase0 5312215976Sjmallett [23] corresponds to DQ[63:0], Phase1 5313215976Sjmallett [24] corresponds to DQ[127:64], Phase1 5314215976Sjmallett In 64b mode, ecc is calculated on 2 cycle worth of data 5315215976Sjmallett [21] corresponds to DQ[63:0], Phase0, cycle0 5316215976Sjmallett [22] corresponds to DQ[63:0], Phase0, cycle1 5317215976Sjmallett [23] corresponds to DQ[63:0], Phase1, cycle0 5318215976Sjmallett [24] corresponds to DQ[63:0], Phase1, cycle1 5319215976Sjmallett Write of 1 will clear the corresponding error bit */ 5320215976Sjmallett uint64_t intr_ded_ena : 1; /**< ECC Double Error Detect(DED) Interrupt Enable bit 5321215976Sjmallett When set, the memory controller raises a processor 5322215976Sjmallett interrupt on detecting an uncorrectable Dbl Bit ECC 5323215976Sjmallett error. */ 5324215976Sjmallett uint64_t intr_sec_ena : 1; /**< ECC Single Error Correct(SEC) Interrupt Enable bit 5325215976Sjmallett When set, the memory controller raises a processor 5326215976Sjmallett interrupt on detecting a correctable Single Bit ECC 5327215976Sjmallett error. */ 5328215976Sjmallett uint64_t tcl : 4; /**< This register is not used */ 5329215976Sjmallett uint64_t ref_int : 6; /**< Refresh interval represented in \#of 512 dclk increments. 5330215976Sjmallett Program this to RND-DN(tREFI/clkPeriod/512) 5331215976Sjmallett - 000000: RESERVED 5332215976Sjmallett - 000001: 1 * 512 = 512 dclks 5333215976Sjmallett - ... 5334215976Sjmallett - 111111: 63 * 512 = 32256 dclks */ 5335215976Sjmallett uint64_t pbank_lsb : 4; /**< Physical Bank address select 5336215976Sjmallett Reverting to the explanation for ROW_LSB, 5337215976Sjmallett PBank_LSB would be Row_LSB bit + \#rowbits 5338215976Sjmallett + \#rankbits 5339215976Sjmallett In the 512MB DIMM Example, assuming no rank bits: 5340215976Sjmallett pbank_lsb=mem_addr[15+13] for 64 b mode 5341215976Sjmallett =mem_addr[16+13] for 128b mode 5342215976Sjmallett Hence the parameter 5343215976Sjmallett 0000:pbank[1:0] = mem_adr[28:27] / rank = mem_adr[26] (if bunk_ena) 5344215976Sjmallett 0001:pbank[1:0] = mem_adr[29:28] / rank = mem_adr[27] " 5345215976Sjmallett 0010:pbank[1:0] = mem_adr[30:29] / rank = mem_adr[28] " 5346215976Sjmallett 0011:pbank[1:0] = mem_adr[31:30] / rank = mem_adr[29] " 5347215976Sjmallett 0100:pbank[1:0] = mem_adr[32:31] / rank = mem_adr[30] " 5348215976Sjmallett 0101:pbank[1:0] = mem_adr[33:32] / rank = mem_adr[31] " 5349215976Sjmallett 0110:pbank[1:0] =[1'b0,mem_adr[33]] / rank = mem_adr[32] " 5350215976Sjmallett 0111:pbank[1:0] =[2'b0] / rank = mem_adr[33] " 5351215976Sjmallett 1000-1111: RESERVED */ 5352215976Sjmallett uint64_t row_lsb : 3; /**< Encoding used to determine which memory address 5353215976Sjmallett bit position represents the low order DDR ROW address. 5354215976Sjmallett The processor's memory address[33:7] needs to be 5355215976Sjmallett translated to DRAM addresses (bnk,row,col,rank and dimm) 5356215976Sjmallett and that is a function of the following: 5357215976Sjmallett 1. \# Banks (4 or 8) - spec'd by BANK8 5358215976Sjmallett 2. Datapath Width(64 or 128) - MODE128b 5359215976Sjmallett 3. \# Ranks in a DIMM - spec'd by BUNK_ENA 5360215976Sjmallett 4. \# DIMM's in the system 5361215976Sjmallett 5. \# Column Bits of the memory part - spec'd indirectly 5362215976Sjmallett by this register. 5363215976Sjmallett 6. \# Row Bits of the memory part - spec'd indirectly 5364215976Sjmallett by the register below (PBANK_LSB). 5365215976Sjmallett Illustration: For Micron's MT18HTF6472A,512MB DDR2 5366215976Sjmallett Unbuffered DIMM which uses 256Mb parts (8M x 8 x 4), 5367215976Sjmallett \# Banks = 4 -> 2 bits of BA 5368215976Sjmallett \# Columns = 1K -> 10 bits of Col 5369215976Sjmallett \# Rows = 8K -> 13 bits of Row 5370215976Sjmallett Assuming that the total Data width is 128, this is how 5371215976Sjmallett we arrive at row_lsb: 5372215976Sjmallett Col Address starts from mem_addr[4] for 128b (16Bytes) 5373215976Sjmallett dq width or from mem_addr[3] for 64b (8Bytes) dq width 5374215976Sjmallett \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for 5375215976Sjmallett 64bmode or mem_adr[16] for 128b mode. Hence row_lsb 5376215976Sjmallett parameter should be set to 001 (64b) or 010 (128b). 5377215976Sjmallett - 000: row_lsb = mem_adr[14] 5378215976Sjmallett - 001: row_lsb = mem_adr[15] 5379215976Sjmallett - 010: row_lsb = mem_adr[16] 5380215976Sjmallett - 011: row_lsb = mem_adr[17] 5381215976Sjmallett - 100: row_lsb = mem_adr[18] 5382215976Sjmallett - 101-111:row_lsb = RESERVED */ 5383215976Sjmallett uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC 5384215976Sjmallett check/correct logic. Should be 1 when used with DIMMs 5385215976Sjmallett with ECC. 0, otherwise. 5386215976Sjmallett When this mode is turned on, DQ[71:64] and DQ[143:137] 5387215976Sjmallett on writes, will contain the ECC code generated for 5388215976Sjmallett the lower 64 and upper 64 bits of data which will 5389215976Sjmallett written in the memory and then later on reads, used 5390215976Sjmallett to check for Single bit error (which will be auto- 5391215976Sjmallett corrected) and Double Bit error (which will be 5392215976Sjmallett reported). When not turned on, DQ[71:64] and DQ[143:137] 5393215976Sjmallett are driven to 0. Please refer to SEC_ERR, DED_ERR, 5394215976Sjmallett LMC_FADR, and LMC_ECC_SYND registers 5395215976Sjmallett for diagnostics information when there is an error. */ 5396215976Sjmallett uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory initialization 5397215976Sjmallett sequence. */ 5398215976Sjmallett#else 5399215976Sjmallett uint64_t init_start : 1; 5400215976Sjmallett uint64_t ecc_ena : 1; 5401215976Sjmallett uint64_t row_lsb : 3; 5402215976Sjmallett uint64_t pbank_lsb : 4; 5403215976Sjmallett uint64_t ref_int : 6; 5404215976Sjmallett uint64_t tcl : 4; 5405215976Sjmallett uint64_t intr_sec_ena : 1; 5406215976Sjmallett uint64_t intr_ded_ena : 1; 5407215976Sjmallett uint64_t sec_err : 4; 5408215976Sjmallett uint64_t ded_err : 4; 5409215976Sjmallett uint64_t bunk_ena : 1; 5410215976Sjmallett uint64_t silo_qc : 1; 5411215976Sjmallett uint64_t reset : 1; 5412215976Sjmallett uint64_t reserved_32_63 : 32; 5413215976Sjmallett#endif 5414215976Sjmallett } s; 5415215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn30xx; 5416215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn31xx; 5417215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn38xx; 5418215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn38xxp2; 5419215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn50xx; 5420215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn52xx; 5421215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn52xxp1; 5422215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn56xx; 5423215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn56xxp1; 5424215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn58xx; 5425215976Sjmallett struct cvmx_lmcx_mem_cfg0_s cn58xxp1; 5426215976Sjmallett}; 5427215976Sjmalletttypedef union cvmx_lmcx_mem_cfg0 cvmx_lmcx_mem_cfg0_t; 5428215976Sjmallett 5429215976Sjmallett/** 5430215976Sjmallett * cvmx_lmc#_mem_cfg1 5431215976Sjmallett * 5432215976Sjmallett * LMC_MEM_CFG1 = LMC Memory Configuration Register1 5433215976Sjmallett * 5434215976Sjmallett * This register controls the External Memory Configuration Timing Parameters. Please refer to the 5435215976Sjmallett * appropriate DDR part spec from your memory vendor for the various values in this CSR. 5436215976Sjmallett * The details of each of these timing parameters can be found in the JEDEC spec or the vendor 5437215976Sjmallett * spec of the memory parts. 5438215976Sjmallett */ 5439232812Sjmallettunion cvmx_lmcx_mem_cfg1 { 5440215976Sjmallett uint64_t u64; 5441232812Sjmallett struct cvmx_lmcx_mem_cfg1_s { 5442232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5443215976Sjmallett uint64_t reserved_32_63 : 32; 5444215976Sjmallett uint64_t comp_bypass : 1; /**< Compensation bypass. */ 5445215976Sjmallett uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different 5446215976Sjmallett banks. (Represented in tCYC cycles == 1dclks) 5447215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,200MHz=3) 5448215976Sjmallett For DDR2, TYP=7.5ns 5449215976Sjmallett - 000: RESERVED 5450215976Sjmallett - 001: 1 tCYC 5451215976Sjmallett - 010: 2 tCYC 5452215976Sjmallett - 011: 3 tCYC 5453215976Sjmallett - 100: 4 tCYC 5454215976Sjmallett - 101: 5 tCYC 5455215976Sjmallett - 110: 6 tCYC 5456215976Sjmallett - 111: 7 tCYC */ 5457215976Sjmallett uint64_t caslat : 3; /**< CAS Latency Encoding which is loaded into each DDR 5458215976Sjmallett SDRAM device (MRS[6:4]) upon power-up (INIT_START=1). 5459215976Sjmallett (Represented in tCYC cycles == 1 dclks) 5460215976Sjmallett 000 RESERVED 5461215976Sjmallett 001 RESERVED 5462215976Sjmallett 010 2.0 tCYC 5463215976Sjmallett 011 3.0 tCYC 5464215976Sjmallett 100 4.0 tCYC 5465215976Sjmallett 101 5.0 tCYC 5466215976Sjmallett 110 6.0 tCYC 5467215976Sjmallett 111 RESERVED 5468215976Sjmallett eg). The parameters TSKW, SILO_HC, and SILO_QC can 5469215976Sjmallett account for 1/4 cycle granularity in board/etch delays. */ 5470215976Sjmallett uint64_t tmrd : 3; /**< tMRD Cycles 5471215976Sjmallett (Represented in dclk tCYC) 5472215976Sjmallett For DDR2, its TYP 2*tCYC) 5473215976Sjmallett - 000: RESERVED 5474215976Sjmallett - 001: 1 5475215976Sjmallett - 010: 2 5476215976Sjmallett - 011: 3 5477215976Sjmallett - 100: 4 5478215976Sjmallett - 101-111: RESERVED */ 5479215976Sjmallett uint64_t trfc : 5; /**< Indicates tRFC constraints. 5480215976Sjmallett Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)], 5481215976Sjmallett where tRFC is from the DDR2 spec, and tcyc(ns) 5482215976Sjmallett is the DDR clock frequency (not data rate). 5483215976Sjmallett For example, with 2Gb, DDR2-667 parts, 5484215976Sjmallett typ tRFC=195ns, so TRFC (CSR field) = 0x11. 5485215976Sjmallett TRFC (binary): Corresponding tRFC Cycles 5486215976Sjmallett ---------------------------------------- 5487215976Sjmallett - 00000-00001: RESERVED 5488215976Sjmallett - 00010: 0-8 5489215976Sjmallett - 00011: 9-12 5490215976Sjmallett - 00100: 13-16 5491215976Sjmallett - ... 5492215976Sjmallett - 11110: 117-120 5493215976Sjmallett - 11111: 121-124 */ 5494215976Sjmallett uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)] 5495215976Sjmallett (Represented in tCYC cycles == 1dclk) 5496215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) 5497215976Sjmallett - 0000: RESERVED 5498215976Sjmallett - 0001: 1 5499215976Sjmallett - ... 5500215976Sjmallett - 1001: 9 5501215976Sjmallett - 1010-1111: RESERVED 5502215976Sjmallett When using parts with 8 banks (LMC_DDR2_CTL->BANK8 5503215976Sjmallett is 1), load tRP cycles + 1 into this register. */ 5504215976Sjmallett uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)] 5505215976Sjmallett Last Wr Data to Rd Command time. 5506215976Sjmallett (Represented in tCYC cycles == 1dclks) 5507215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP) 5508215976Sjmallett - 0000: RESERVED 5509215976Sjmallett - 0001: 1 5510215976Sjmallett - ... 5511215976Sjmallett - 0111: 7 5512215976Sjmallett - 1000-1111: RESERVED */ 5513215976Sjmallett uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)] 5514215976Sjmallett (Represented in tCYC cycles == 1dclk) 5515215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) 5516215976Sjmallett - 0000: RESERVED 5517215976Sjmallett - 0001: 2 (2 is the smallest value allowed) 5518215976Sjmallett - 0002: 2 5519215976Sjmallett - ... 5520215976Sjmallett - 1001: 9 5521215976Sjmallett - 1010-1111: RESERVED 5522215976Sjmallett In 2T mode, make this register TRCD-1, not going 5523215976Sjmallett below 2. */ 5524215976Sjmallett uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)] 5525215976Sjmallett (Represented in tCYC cycles == 1 dclk) 5526215976Sjmallett - 00000-0001: RESERVED 5527215976Sjmallett - 00010: 2 5528215976Sjmallett - ... 5529215976Sjmallett - 11111: 31 */ 5530215976Sjmallett#else 5531215976Sjmallett uint64_t tras : 5; 5532215976Sjmallett uint64_t trcd : 4; 5533215976Sjmallett uint64_t twtr : 4; 5534215976Sjmallett uint64_t trp : 4; 5535215976Sjmallett uint64_t trfc : 5; 5536215976Sjmallett uint64_t tmrd : 3; 5537215976Sjmallett uint64_t caslat : 3; 5538215976Sjmallett uint64_t trrd : 3; 5539215976Sjmallett uint64_t comp_bypass : 1; 5540215976Sjmallett uint64_t reserved_32_63 : 32; 5541215976Sjmallett#endif 5542215976Sjmallett } s; 5543215976Sjmallett struct cvmx_lmcx_mem_cfg1_s cn30xx; 5544215976Sjmallett struct cvmx_lmcx_mem_cfg1_s cn31xx; 5545232812Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx { 5546232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5547215976Sjmallett uint64_t reserved_31_63 : 33; 5548215976Sjmallett uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different 5549215976Sjmallett banks. (Represented in tCYC cycles == 1dclks) 5550215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,200MHz=3) 5551215976Sjmallett For DDR2, TYP=7.5ns 5552215976Sjmallett - 000: RESERVED 5553215976Sjmallett - 001: 1 tCYC 5554215976Sjmallett - 010: 2 tCYC 5555215976Sjmallett - 011: 3 tCYC 5556215976Sjmallett - 100: 4 tCYC 5557215976Sjmallett - 101: 5 tCYC 5558215976Sjmallett - 110-111: RESERVED */ 5559215976Sjmallett uint64_t caslat : 3; /**< CAS Latency Encoding which is loaded into each DDR 5560215976Sjmallett SDRAM device (MRS[6:4]) upon power-up (INIT_START=1). 5561215976Sjmallett (Represented in tCYC cycles == 1 dclks) 5562215976Sjmallett 000 RESERVED 5563215976Sjmallett 001 RESERVED 5564215976Sjmallett 010 2.0 tCYC 5565215976Sjmallett 011 3.0 tCYC 5566215976Sjmallett 100 4.0 tCYC 5567215976Sjmallett 101 5.0 tCYC 5568215976Sjmallett 110 6.0 tCYC (DDR2) 5569215976Sjmallett 2.5 tCYC (DDR1) 5570215976Sjmallett 111 RESERVED 5571215976Sjmallett eg). The parameters TSKW, SILO_HC, and SILO_QC can 5572215976Sjmallett account for 1/4 cycle granularity in board/etch delays. */ 5573215976Sjmallett uint64_t tmrd : 3; /**< tMRD Cycles 5574215976Sjmallett (Represented in dclk tCYC) 5575215976Sjmallett For DDR2, its TYP 2*tCYC) 5576215976Sjmallett - 000: RESERVED 5577215976Sjmallett - 001: 1 5578215976Sjmallett - 010: 2 5579215976Sjmallett - 011: 3 5580215976Sjmallett - 100: 4 5581215976Sjmallett - 101-111: RESERVED */ 5582215976Sjmallett uint64_t trfc : 5; /**< Indicates tRFC constraints. 5583215976Sjmallett Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)], 5584215976Sjmallett where tRFC is from the DDR2 spec, and tcyc(ns) 5585215976Sjmallett is the DDR clock frequency (not data rate). 5586215976Sjmallett For example, with 2Gb, DDR2-667 parts, 5587215976Sjmallett typ tRFC=195ns, so TRFC (CSR field) = 0x11. 5588215976Sjmallett TRFC (binary): Corresponding tRFC Cycles 5589215976Sjmallett ---------------------------------------- 5590215976Sjmallett - 00000-00001: RESERVED 5591215976Sjmallett - 00010: 0-8 5592215976Sjmallett - 00011: 9-12 5593215976Sjmallett - 00100: 13-16 5594215976Sjmallett - ... 5595215976Sjmallett - 11110: 117-120 5596215976Sjmallett - 11111: 121-124 */ 5597215976Sjmallett uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)] 5598215976Sjmallett (Represented in tCYC cycles == 1dclk) 5599215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) 5600215976Sjmallett - 0000: RESERVED 5601215976Sjmallett - 0001: 1 5602215976Sjmallett - ... 5603215976Sjmallett - 0111: 7 5604215976Sjmallett - 1000-1111: RESERVED 5605215976Sjmallett When using parts with 8 banks (LMC_DDR2_CTL->BANK8 5606215976Sjmallett is 1), load tRP cycles + 1 into this register. */ 5607215976Sjmallett uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)] 5608215976Sjmallett Last Wr Data to Rd Command time. 5609215976Sjmallett (Represented in tCYC cycles == 1dclks) 5610215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP) 5611215976Sjmallett - 0000: RESERVED 5612215976Sjmallett - 0001: 1 5613215976Sjmallett - ... 5614215976Sjmallett - 0111: 7 5615215976Sjmallett - 1000-1111: RESERVED */ 5616215976Sjmallett uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)] 5617215976Sjmallett (Represented in tCYC cycles == 1dclk) 5618215976Sjmallett TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) 5619215976Sjmallett - 0000: RESERVED 5620215976Sjmallett - 0001: 2 (2 is the smallest value allowed) 5621215976Sjmallett - 0002: 2 5622215976Sjmallett - ... 5623215976Sjmallett - 0111: 7 5624215976Sjmallett - 1110-1111: RESERVED 5625215976Sjmallett In 2T mode, make this register TRCD-1, not going 5626215976Sjmallett below 2. */ 5627215976Sjmallett uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)] 5628215976Sjmallett (Represented in tCYC cycles == 1 dclk) 5629215976Sjmallett For DDR-I mode: 5630215976Sjmallett TYP=45ns (66MHz=3,167MHz=8,400MHz=18 5631215976Sjmallett - 00000-0001: RESERVED 5632215976Sjmallett - 00010: 2 5633215976Sjmallett - ... 5634215976Sjmallett - 10100: 20 5635215976Sjmallett - 10101-11111: RESERVED */ 5636215976Sjmallett#else 5637215976Sjmallett uint64_t tras : 5; 5638215976Sjmallett uint64_t trcd : 4; 5639215976Sjmallett uint64_t twtr : 4; 5640215976Sjmallett uint64_t trp : 4; 5641215976Sjmallett uint64_t trfc : 5; 5642215976Sjmallett uint64_t tmrd : 3; 5643215976Sjmallett uint64_t caslat : 3; 5644215976Sjmallett uint64_t trrd : 3; 5645215976Sjmallett uint64_t reserved_31_63 : 33; 5646215976Sjmallett#endif 5647215976Sjmallett } cn38xx; 5648215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2; 5649215976Sjmallett struct cvmx_lmcx_mem_cfg1_s cn50xx; 5650215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx; 5651215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1; 5652215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx; 5653215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1; 5654215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx; 5655215976Sjmallett struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1; 5656215976Sjmallett}; 5657215976Sjmalletttypedef union cvmx_lmcx_mem_cfg1 cvmx_lmcx_mem_cfg1_t; 5658215976Sjmallett 5659215976Sjmallett/** 5660215976Sjmallett * cvmx_lmc#_modereg_params0 5661215976Sjmallett * 5662215976Sjmallett * Notes: 5663215976Sjmallett * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers. 5664215976Sjmallett * 5665215976Sjmallett */ 5666232812Sjmallettunion cvmx_lmcx_modereg_params0 { 5667215976Sjmallett uint64_t u64; 5668232812Sjmallett struct cvmx_lmcx_modereg_params0_s { 5669232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5670215976Sjmallett uint64_t reserved_25_63 : 39; 5671215976Sjmallett uint64_t ppd : 1; /**< DLL Control for precharge powerdown 5672215976Sjmallett 0 = Slow exit (DLL off) 5673215976Sjmallett 1 = Fast exit (DLL on) 5674215976Sjmallett LMC writes this value to MR0[PPD] in the selected DDR3 parts 5675215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5676215976Sjmallett self-refresh exit instruction sequences. 5677215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5678215976Sjmallett This value must equal the MR0[PPD] value in all the DDR3 5679215976Sjmallett parts attached to all ranks during normal operation. */ 5680215976Sjmallett uint64_t wrp : 3; /**< Write recovery for auto precharge 5681215976Sjmallett Should be programmed to be equal to or greater than 5682215976Sjmallett RNDUP[tWR(ns)/tCYC(ns)] 5683232812Sjmallett 000 = 5 5684215976Sjmallett 001 = 5 5685215976Sjmallett 010 = 6 5686215976Sjmallett 011 = 7 5687215976Sjmallett 100 = 8 5688215976Sjmallett 101 = 10 5689215976Sjmallett 110 = 12 5690232812Sjmallett 111 = 14 5691215976Sjmallett LMC writes this value to MR0[WR] in the selected DDR3 parts 5692215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5693215976Sjmallett self-refresh exit instruction sequences. 5694215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5695215976Sjmallett This value must equal the MR0[WR] value in all the DDR3 5696215976Sjmallett parts attached to all ranks during normal operation. */ 5697215976Sjmallett uint64_t dllr : 1; /**< DLL Reset 5698215976Sjmallett LMC writes this value to MR0[DLL] in the selected DDR3 parts 5699215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5700215976Sjmallett self-refresh exit instruction sequences. 5701215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5702215976Sjmallett The MR0[DLL] value must be 0 in all the DDR3 5703215976Sjmallett parts attached to all ranks during normal operation. */ 5704215976Sjmallett uint64_t tm : 1; /**< Test Mode 5705215976Sjmallett LMC writes this value to MR0[TM] in the selected DDR3 parts 5706215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5707215976Sjmallett self-refresh exit instruction sequences. 5708215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5709215976Sjmallett The MR0[TM] value must be 0 in all the DDR3 5710215976Sjmallett parts attached to all ranks during normal operation. */ 5711215976Sjmallett uint64_t rbt : 1; /**< Read Burst Type 5712215976Sjmallett 1 = interleaved (fixed) 5713215976Sjmallett LMC writes this value to MR0[RBT] in the selected DDR3 parts 5714215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5715215976Sjmallett self-refresh exit instruction sequences. 5716215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5717215976Sjmallett The MR0[RBT] value must be 1 in all the DDR3 5718215976Sjmallett parts attached to all ranks during normal operation. */ 5719215976Sjmallett uint64_t cl : 4; /**< CAS Latency 5720215976Sjmallett 0010 = 5 5721215976Sjmallett 0100 = 6 5722215976Sjmallett 0110 = 7 5723215976Sjmallett 1000 = 8 5724215976Sjmallett 1010 = 9 5725215976Sjmallett 1100 = 10 5726215976Sjmallett 1110 = 11 5727232812Sjmallett 0001 = 12 5728232812Sjmallett 0011 = 13 5729232812Sjmallett 0101 = 14 5730232812Sjmallett 0111 = 15 5731232812Sjmallett 1001 = 16 5732232812Sjmallett 0000, 1011, 1101, 1111 = Reserved 5733215976Sjmallett LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts 5734215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5735215976Sjmallett self-refresh exit instruction sequences. 5736215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5737215976Sjmallett This value must equal the MR0[CAS Latency / CL] value in all the DDR3 5738215976Sjmallett parts attached to all ranks during normal operation. */ 5739215976Sjmallett uint64_t bl : 2; /**< Burst Length 5740215976Sjmallett 0 = 8 (fixed) 5741215976Sjmallett LMC writes this value to MR0[BL] in the selected DDR3 parts 5742215976Sjmallett during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5743215976Sjmallett self-refresh exit instruction sequences. 5744215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5745215976Sjmallett The MR0[BL] value must be 0 in all the DDR3 5746215976Sjmallett parts attached to all ranks during normal operation. */ 5747215976Sjmallett uint64_t qoff : 1; /**< Qoff Enable 5748215976Sjmallett 0 = enable 5749215976Sjmallett 1 = disable 5750215976Sjmallett LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks 5751215976Sjmallett during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5752215976Sjmallett self-refresh entry and exit instruction sequences. 5753215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and 5754215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5755215976Sjmallett The MR1[Qoff] value must be 0 in all the DDR3 5756215976Sjmallett parts attached to all ranks during normal operation. */ 5757215976Sjmallett uint64_t tdqs : 1; /**< TDQS Enable 5758215976Sjmallett 0 = disable 5759215976Sjmallett LMC writes this value to MR1[TDQS] in the DDR3 parts in the selected ranks 5760215976Sjmallett during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5761215976Sjmallett self-refresh entry and exit instruction sequences. 5762215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and 5763215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5764215976Sjmallett uint64_t wlev : 1; /**< Write Leveling Enable 5765215976Sjmallett 0 = disable 5766215976Sjmallett LMC writes MR1[Level]=0 in the DDR3 parts in the selected ranks 5767215976Sjmallett during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5768215976Sjmallett self-refresh entry and exit instruction sequences. 5769215976Sjmallett (Write-leveling can only be initiated via the 5770215976Sjmallett write-leveling instruction sequence.) 5771215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and 5772215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5773215976Sjmallett uint64_t al : 2; /**< Additive Latency 5774215976Sjmallett 00 = 0 5775215976Sjmallett 01 = CL-1 5776215976Sjmallett 10 = CL-2 5777215976Sjmallett 11 = Reserved 5778215976Sjmallett LMC writes this value to MR1[AL] in the selected DDR3 parts 5779215976Sjmallett during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5780215976Sjmallett self-refresh entry and exit instruction sequences. 5781215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5782215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5783215976Sjmallett This value must equal the MR1[AL] value in all the DDR3 5784215976Sjmallett parts attached to all ranks during normal operation. 5785215976Sjmallett See also LMC*_CONTROL[POCAS]. */ 5786215976Sjmallett uint64_t dll : 1; /**< DLL Enable 5787215976Sjmallett 0 = enable 5788215976Sjmallett 1 = disable. 5789215976Sjmallett LMC writes this value to MR1[DLL] in the selected DDR3 parts 5790215976Sjmallett during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5791215976Sjmallett self-refresh entry and exit instruction sequences. 5792215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5793215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5794215976Sjmallett This value must equal the MR1[DLL] value in all the DDR3 5795215976Sjmallett parts attached to all ranks during normal operation. 5796215976Sjmallett In dll-off mode, CL/CWL must be programmed 5797215976Sjmallett equal to 6/6, respectively, as per the DDR3 specifications. */ 5798215976Sjmallett uint64_t mpr : 1; /**< MPR 5799215976Sjmallett LMC writes this value to MR3[MPR] in the selected DDR3 parts 5800215976Sjmallett during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5801215976Sjmallett self-refresh exit instruction sequences. 5802215976Sjmallett (LMC also writes MR3[MPR]=1 at the beginning of the 5803215976Sjmallett read-leveling instruction sequence. Read-leveling should only be initiated via the 5804215976Sjmallett read-leveling instruction sequence.) 5805215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5806215976Sjmallett The MR3[MPR] value must be 0 in all the DDR3 5807215976Sjmallett parts attached to all ranks during normal operation. */ 5808215976Sjmallett uint64_t mprloc : 2; /**< MPR Location 5809215976Sjmallett LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts 5810215976Sjmallett during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5811215976Sjmallett self-refresh exit instruction sequences. 5812215976Sjmallett (LMC also writes MR3[MPRLoc]=0 at the beginning of the 5813215976Sjmallett read-leveling instruction sequence.) 5814215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK]. 5815215976Sjmallett The MR3[MPRLoc] value must be 0 in all the DDR3 5816215976Sjmallett parts attached to all ranks during normal operation. */ 5817215976Sjmallett uint64_t cwl : 3; /**< CAS Write Latency 5818215976Sjmallett - 000: 5 5819215976Sjmallett - 001: 6 5820215976Sjmallett - 010: 7 5821215976Sjmallett - 011: 8 5822232812Sjmallett - 100: 9 5823232812Sjmallett - 101: 10 5824232812Sjmallett - 110: 11 5825232812Sjmallett - 111: 12 5826215976Sjmallett LMC writes this value to MR2[CWL] in the selected DDR3 parts 5827215976Sjmallett during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5828215976Sjmallett self-refresh entry and exit instruction sequences. 5829215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5830215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5831215976Sjmallett This value must equal the MR2[CWL] value in all the DDR3 5832215976Sjmallett parts attached to all ranks during normal operation. */ 5833215976Sjmallett#else 5834215976Sjmallett uint64_t cwl : 3; 5835215976Sjmallett uint64_t mprloc : 2; 5836215976Sjmallett uint64_t mpr : 1; 5837215976Sjmallett uint64_t dll : 1; 5838215976Sjmallett uint64_t al : 2; 5839215976Sjmallett uint64_t wlev : 1; 5840215976Sjmallett uint64_t tdqs : 1; 5841215976Sjmallett uint64_t qoff : 1; 5842215976Sjmallett uint64_t bl : 2; 5843215976Sjmallett uint64_t cl : 4; 5844215976Sjmallett uint64_t rbt : 1; 5845215976Sjmallett uint64_t tm : 1; 5846215976Sjmallett uint64_t dllr : 1; 5847215976Sjmallett uint64_t wrp : 3; 5848215976Sjmallett uint64_t ppd : 1; 5849215976Sjmallett uint64_t reserved_25_63 : 39; 5850215976Sjmallett#endif 5851215976Sjmallett } s; 5852232812Sjmallett struct cvmx_lmcx_modereg_params0_s cn61xx; 5853215976Sjmallett struct cvmx_lmcx_modereg_params0_s cn63xx; 5854215976Sjmallett struct cvmx_lmcx_modereg_params0_s cn63xxp1; 5855232812Sjmallett struct cvmx_lmcx_modereg_params0_s cn66xx; 5856232812Sjmallett struct cvmx_lmcx_modereg_params0_s cn68xx; 5857232812Sjmallett struct cvmx_lmcx_modereg_params0_s cn68xxp1; 5858232812Sjmallett struct cvmx_lmcx_modereg_params0_s cnf71xx; 5859215976Sjmallett}; 5860215976Sjmalletttypedef union cvmx_lmcx_modereg_params0 cvmx_lmcx_modereg_params0_t; 5861215976Sjmallett 5862215976Sjmallett/** 5863215976Sjmallett * cvmx_lmc#_modereg_params1 5864215976Sjmallett * 5865215976Sjmallett * Notes: 5866215976Sjmallett * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers. 5867215976Sjmallett * 5868215976Sjmallett */ 5869232812Sjmallettunion cvmx_lmcx_modereg_params1 { 5870215976Sjmallett uint64_t u64; 5871232812Sjmallett struct cvmx_lmcx_modereg_params1_s { 5872232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 5873215976Sjmallett uint64_t reserved_48_63 : 16; 5874215976Sjmallett uint64_t rtt_nom_11 : 3; /**< RTT_NOM Rank 3 5875215976Sjmallett LMC writes this value to MR1[Rtt_Nom] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts 5876215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5877215976Sjmallett self-refresh entry and exit instruction sequences. 5878215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5879215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5880215976Sjmallett Per JEDEC DDR3 specifications, if RTT_Nom is used during writes, 5881215976Sjmallett only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed. 5882215976Sjmallett Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */ 5883215976Sjmallett uint64_t dic_11 : 2; /**< Output Driver Impedance Control Rank 3 5884215976Sjmallett LMC writes this value to MR1[D.I.C.] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts 5885215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5886215976Sjmallett self-refresh entry and exit instruction sequences. 5887215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5888215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5889215976Sjmallett uint64_t rtt_wr_11 : 2; /**< RTT_WR Rank 3 5890215976Sjmallett LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts 5891215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5892215976Sjmallett self-refresh entry and exit instruction sequences. 5893215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5894215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5895215976Sjmallett uint64_t srt_11 : 1; /**< Self-refresh temperature range Rank 3 5896215976Sjmallett LMC writes this value to MR2[SRT] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts 5897215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5898215976Sjmallett self-refresh entry and exit instruction sequences. 5899215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5900215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5901215976Sjmallett uint64_t asr_11 : 1; /**< Auto self-refresh Rank 3 5902215976Sjmallett LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts 5903215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5904215976Sjmallett self-refresh entry and exit instruction sequences. 5905215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5906215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5907215976Sjmallett uint64_t pasr_11 : 3; /**< Partial array self-refresh Rank 3 5908215976Sjmallett LMC writes this value to MR2[PASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts 5909215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5910215976Sjmallett self-refresh entry and exit instruction sequences. 5911215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5912215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5913215976Sjmallett uint64_t rtt_nom_10 : 3; /**< RTT_NOM Rank 2 5914215976Sjmallett LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts 5915215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5916215976Sjmallett self-refresh entry and exit instruction sequences. 5917215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5918215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5919215976Sjmallett Per JEDEC DDR3 specifications, if RTT_Nom is used during writes, 5920215976Sjmallett only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed. 5921215976Sjmallett Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */ 5922215976Sjmallett uint64_t dic_10 : 2; /**< Output Driver Impedance Control Rank 2 5923215976Sjmallett LMC writes this value to MR1[D.I.C.] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts 5924215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5925215976Sjmallett self-refresh entry and exit instruction sequences. 5926215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5927215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5928215976Sjmallett uint64_t rtt_wr_10 : 2; /**< RTT_WR Rank 2 5929215976Sjmallett LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts 5930215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5931215976Sjmallett self-refresh entry and exit instruction sequences. 5932215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5933215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5934215976Sjmallett uint64_t srt_10 : 1; /**< Self-refresh temperature range Rank 2 5935215976Sjmallett LMC writes this value to MR2[SRT] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts 5936215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5937215976Sjmallett self-refresh entry and exit instruction sequences. 5938215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5939215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5940215976Sjmallett uint64_t asr_10 : 1; /**< Auto self-refresh Rank 2 5941215976Sjmallett LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts 5942215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5943215976Sjmallett self-refresh entry and exit instruction sequences. 5944215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5945215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5946215976Sjmallett uint64_t pasr_10 : 3; /**< Partial array self-refresh Rank 2 5947215976Sjmallett LMC writes this value to MR2[PASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts 5948215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5949215976Sjmallett self-refresh entry and exit instruction sequences. 5950215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5951215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5952215976Sjmallett uint64_t rtt_nom_01 : 3; /**< RTT_NOM Rank 1 5953215976Sjmallett LMC writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts 5954215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5955215976Sjmallett self-refresh entry and exit instruction sequences. 5956215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5957215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5958215976Sjmallett Per JEDEC DDR3 specifications, if RTT_Nom is used during writes, 5959215976Sjmallett only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed. 5960215976Sjmallett Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */ 5961215976Sjmallett uint64_t dic_01 : 2; /**< Output Driver Impedance Control Rank 1 5962215976Sjmallett LMC writes this value to MR1[D.I.C.] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts 5963215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5964215976Sjmallett self-refresh entry and exit instruction sequences. 5965215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5966215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5967215976Sjmallett uint64_t rtt_wr_01 : 2; /**< RTT_WR Rank 1 5968215976Sjmallett LMC writes this value to MR2[Rtt_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts 5969215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5970215976Sjmallett self-refresh entry and exit instruction sequences. 5971215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5972215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5973215976Sjmallett uint64_t srt_01 : 1; /**< Self-refresh temperature range Rank 1 5974215976Sjmallett LMC writes this value to MR2[SRT] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts 5975215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5976215976Sjmallett self-refresh entry and exit instruction sequences. 5977215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5978215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5979215976Sjmallett uint64_t asr_01 : 1; /**< Auto self-refresh Rank 1 5980215976Sjmallett LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts 5981215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5982215976Sjmallett self-refresh entry and exit instruction sequences. 5983215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5984215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5985215976Sjmallett uint64_t pasr_01 : 3; /**< Partial array self-refresh Rank 1 5986215976Sjmallett LMC writes this value to MR2[PASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts 5987215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5988215976Sjmallett self-refresh entry and exit instruction sequences. 5989215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5990215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 5991215976Sjmallett uint64_t rtt_nom_00 : 3; /**< RTT_NOM Rank 0 5992215976Sjmallett LMC writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts 5993215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 5994215976Sjmallett self-refresh entry and exit instruction sequences. 5995215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 5996215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. 5997215976Sjmallett Per JEDEC DDR3 specifications, if RTT_Nom is used during writes, 5998215976Sjmallett only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed. 5999215976Sjmallett Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */ 6000215976Sjmallett uint64_t dic_00 : 2; /**< Output Driver Impedance Control Rank 0 6001215976Sjmallett LMC writes this value to MR1[D.I.C.] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts 6002215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 6003215976Sjmallett self-refresh entry and exit instruction sequences. 6004215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 6005215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 6006215976Sjmallett uint64_t rtt_wr_00 : 2; /**< RTT_WR Rank 0 6007215976Sjmallett LMC writes this value to MR2[Rtt_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts 6008215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 6009215976Sjmallett self-refresh entry and exit instruction sequences. 6010215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 6011215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 6012215976Sjmallett uint64_t srt_00 : 1; /**< Self-refresh temperature range Rank 0 6013215976Sjmallett LMC writes this value to MR2[SRT] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts 6014215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 6015215976Sjmallett self-refresh entry and exit instruction sequences. 6016215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 6017215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 6018215976Sjmallett uint64_t asr_00 : 1; /**< Auto self-refresh Rank 0 6019215976Sjmallett LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts 6020215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 6021215976Sjmallett self-refresh entry and exit instruction sequences. 6022215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 6023215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 6024215976Sjmallett uint64_t pasr_00 : 3; /**< Partial array self-refresh Rank 0 6025215976Sjmallett LMC writes this value to MR2[PASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts 6026215976Sjmallett when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set, 6027215976Sjmallett self-refresh entry and exit instruction sequences. 6028215976Sjmallett See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and 6029215976Sjmallett LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */ 6030215976Sjmallett#else 6031215976Sjmallett uint64_t pasr_00 : 3; 6032215976Sjmallett uint64_t asr_00 : 1; 6033215976Sjmallett uint64_t srt_00 : 1; 6034215976Sjmallett uint64_t rtt_wr_00 : 2; 6035215976Sjmallett uint64_t dic_00 : 2; 6036215976Sjmallett uint64_t rtt_nom_00 : 3; 6037215976Sjmallett uint64_t pasr_01 : 3; 6038215976Sjmallett uint64_t asr_01 : 1; 6039215976Sjmallett uint64_t srt_01 : 1; 6040215976Sjmallett uint64_t rtt_wr_01 : 2; 6041215976Sjmallett uint64_t dic_01 : 2; 6042215976Sjmallett uint64_t rtt_nom_01 : 3; 6043215976Sjmallett uint64_t pasr_10 : 3; 6044215976Sjmallett uint64_t asr_10 : 1; 6045215976Sjmallett uint64_t srt_10 : 1; 6046215976Sjmallett uint64_t rtt_wr_10 : 2; 6047215976Sjmallett uint64_t dic_10 : 2; 6048215976Sjmallett uint64_t rtt_nom_10 : 3; 6049215976Sjmallett uint64_t pasr_11 : 3; 6050215976Sjmallett uint64_t asr_11 : 1; 6051215976Sjmallett uint64_t srt_11 : 1; 6052215976Sjmallett uint64_t rtt_wr_11 : 2; 6053215976Sjmallett uint64_t dic_11 : 2; 6054215976Sjmallett uint64_t rtt_nom_11 : 3; 6055215976Sjmallett uint64_t reserved_48_63 : 16; 6056215976Sjmallett#endif 6057215976Sjmallett } s; 6058232812Sjmallett struct cvmx_lmcx_modereg_params1_s cn61xx; 6059215976Sjmallett struct cvmx_lmcx_modereg_params1_s cn63xx; 6060215976Sjmallett struct cvmx_lmcx_modereg_params1_s cn63xxp1; 6061232812Sjmallett struct cvmx_lmcx_modereg_params1_s cn66xx; 6062232812Sjmallett struct cvmx_lmcx_modereg_params1_s cn68xx; 6063232812Sjmallett struct cvmx_lmcx_modereg_params1_s cn68xxp1; 6064232812Sjmallett struct cvmx_lmcx_modereg_params1_s cnf71xx; 6065215976Sjmallett}; 6066215976Sjmalletttypedef union cvmx_lmcx_modereg_params1 cvmx_lmcx_modereg_params1_t; 6067215976Sjmallett 6068215976Sjmallett/** 6069215976Sjmallett * cvmx_lmc#_nxm 6070215976Sjmallett * 6071215976Sjmallett * LMC_NXM = LMC non-existent memory 6072215976Sjmallett * 6073215976Sjmallett * 6074215976Sjmallett * Notes: 6075215976Sjmallett * Decoding for mem_msb/rank 6076215976Sjmallett * - 0000: mem_msb = mem_adr[25] 6077215976Sjmallett * - 0001: mem_msb = mem_adr[26] 6078215976Sjmallett * - 0010: mem_msb = mem_adr[27] 6079215976Sjmallett * - 0011: mem_msb = mem_adr[28] 6080215976Sjmallett * - 0100: mem_msb = mem_adr[29] 6081215976Sjmallett * - 0101: mem_msb = mem_adr[30] 6082215976Sjmallett * - 0110: mem_msb = mem_adr[31] 6083215976Sjmallett * - 0111: mem_msb = mem_adr[32] 6084215976Sjmallett * - 1000: mem_msb = mem_adr[33] 6085215976Sjmallett * - 1001: mem_msb = mem_adr[34] 6086215976Sjmallett * 1010-1111 = Reserved 6087215976Sjmallett * For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank) 6088215976Sjmallett * DDR3 parts, the column address width = 10, so with 6089215976Sjmallett * 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] and 6090215976Sjmallett * mem_msb = 4 6091215976Sjmallett * 6092215976Sjmallett * Note also that addresses greater the max defined space (pbank_msb) are also treated 6093215976Sjmallett * as NXM accesses 6094215976Sjmallett */ 6095232812Sjmallettunion cvmx_lmcx_nxm { 6096215976Sjmallett uint64_t u64; 6097232812Sjmallett struct cvmx_lmcx_nxm_s { 6098232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6099215976Sjmallett uint64_t reserved_40_63 : 24; 6100232812Sjmallett uint64_t mem_msb_d3_r1 : 4; /**< Max Row MSB for DIMM3, RANK1/DIMM3 in Single Ranked 6101232812Sjmallett *UNUSED IN 6xxx* */ 6102232812Sjmallett uint64_t mem_msb_d3_r0 : 4; /**< Max Row MSB for DIMM3, RANK0 6103232812Sjmallett *UNUSED IN 6xxx* */ 6104232812Sjmallett uint64_t mem_msb_d2_r1 : 4; /**< Max Row MSB for DIMM2, RANK1/DIMM2 in Single Ranked 6105232812Sjmallett *UNUSED IN 6xxx* */ 6106232812Sjmallett uint64_t mem_msb_d2_r0 : 4; /**< Max Row MSB for DIMM2, RANK0 6107232812Sjmallett *UNUSED IN 6xxx* */ 6108215976Sjmallett uint64_t mem_msb_d1_r1 : 4; /**< Max Row MSB for DIMM1, RANK1/DIMM1 in Single Ranked */ 6109215976Sjmallett uint64_t mem_msb_d1_r0 : 4; /**< Max Row MSB for DIMM1, RANK0 */ 6110215976Sjmallett uint64_t mem_msb_d0_r1 : 4; /**< Max Row MSB for DIMM0, RANK1/DIMM0 in Single Ranked */ 6111215976Sjmallett uint64_t mem_msb_d0_r0 : 4; /**< Max Row MSB for DIMM0, RANK0 */ 6112215976Sjmallett uint64_t cs_mask : 8; /**< Chip select mask. 6113215976Sjmallett This mask corresponds to the 8 chip selects for a memory 6114215976Sjmallett configuration. If LMC*_CONFIG[RANK_ENA]==0 then this 6115215976Sjmallett mask must be set in pairs because each reference address 6116215976Sjmallett will assert a pair of chip selects. If the chip 6117215976Sjmallett select(s) have a corresponding CS_MASK bit set, then the 6118215976Sjmallett reference is to non-existent memory (NXM). LMC will alias a 6119215976Sjmallett NXM read reference to use the lowest, legal chip select(s) 6120215976Sjmallett and return 0's. LMC normally discards NXM writes, but will 6121215976Sjmallett also alias them when LMC*_CONTROL[NXM_WRITE_EN]=1. 6122232812Sjmallett CS_MASK<7:4> MBZ in 6xxx */ 6123215976Sjmallett#else 6124215976Sjmallett uint64_t cs_mask : 8; 6125215976Sjmallett uint64_t mem_msb_d0_r0 : 4; 6126215976Sjmallett uint64_t mem_msb_d0_r1 : 4; 6127215976Sjmallett uint64_t mem_msb_d1_r0 : 4; 6128215976Sjmallett uint64_t mem_msb_d1_r1 : 4; 6129215976Sjmallett uint64_t mem_msb_d2_r0 : 4; 6130215976Sjmallett uint64_t mem_msb_d2_r1 : 4; 6131215976Sjmallett uint64_t mem_msb_d3_r0 : 4; 6132215976Sjmallett uint64_t mem_msb_d3_r1 : 4; 6133215976Sjmallett uint64_t reserved_40_63 : 24; 6134215976Sjmallett#endif 6135215976Sjmallett } s; 6136232812Sjmallett struct cvmx_lmcx_nxm_cn52xx { 6137232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6138215976Sjmallett uint64_t reserved_8_63 : 56; 6139215976Sjmallett uint64_t cs_mask : 8; /**< Chip select mask. 6140215976Sjmallett This mask corresponds to the 8 chip selects for a memory 6141215976Sjmallett configuration. If LMC_MEM_CFG0[BUNK_ENA]==0 then this 6142215976Sjmallett mask must be set in pairs because each reference address 6143215976Sjmallett will assert a pair of chip selects. If the chip 6144215976Sjmallett select(s) have a corresponding CS_MASK bit set, then the 6145215976Sjmallett reference is to non-existent memory. LMC will alias the 6146215976Sjmallett reference to use the lowest, legal chip select(s) in 6147215976Sjmallett that case. */ 6148215976Sjmallett#else 6149215976Sjmallett uint64_t cs_mask : 8; 6150215976Sjmallett uint64_t reserved_8_63 : 56; 6151215976Sjmallett#endif 6152215976Sjmallett } cn52xx; 6153215976Sjmallett struct cvmx_lmcx_nxm_cn52xx cn56xx; 6154215976Sjmallett struct cvmx_lmcx_nxm_cn52xx cn58xx; 6155232812Sjmallett struct cvmx_lmcx_nxm_s cn61xx; 6156215976Sjmallett struct cvmx_lmcx_nxm_s cn63xx; 6157215976Sjmallett struct cvmx_lmcx_nxm_s cn63xxp1; 6158232812Sjmallett struct cvmx_lmcx_nxm_s cn66xx; 6159232812Sjmallett struct cvmx_lmcx_nxm_s cn68xx; 6160232812Sjmallett struct cvmx_lmcx_nxm_s cn68xxp1; 6161232812Sjmallett struct cvmx_lmcx_nxm_s cnf71xx; 6162215976Sjmallett}; 6163215976Sjmalletttypedef union cvmx_lmcx_nxm cvmx_lmcx_nxm_t; 6164215976Sjmallett 6165215976Sjmallett/** 6166215976Sjmallett * cvmx_lmc#_ops_cnt 6167215976Sjmallett * 6168215976Sjmallett * LMC_OPS_CNT = Performance Counters 6169215976Sjmallett * 6170215976Sjmallett */ 6171232812Sjmallettunion cvmx_lmcx_ops_cnt { 6172215976Sjmallett uint64_t u64; 6173232812Sjmallett struct cvmx_lmcx_ops_cnt_s { 6174232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6175215976Sjmallett uint64_t opscnt : 64; /**< Performance Counter 6176215976Sjmallett 64-bit counter that increments when the DDR3 data bus 6177215976Sjmallett is being used 6178215976Sjmallett DRAM bus utilization = LMC*_OPS_CNT/LMC*_DCLK_CNT */ 6179215976Sjmallett#else 6180215976Sjmallett uint64_t opscnt : 64; 6181215976Sjmallett#endif 6182215976Sjmallett } s; 6183232812Sjmallett struct cvmx_lmcx_ops_cnt_s cn61xx; 6184215976Sjmallett struct cvmx_lmcx_ops_cnt_s cn63xx; 6185215976Sjmallett struct cvmx_lmcx_ops_cnt_s cn63xxp1; 6186232812Sjmallett struct cvmx_lmcx_ops_cnt_s cn66xx; 6187232812Sjmallett struct cvmx_lmcx_ops_cnt_s cn68xx; 6188232812Sjmallett struct cvmx_lmcx_ops_cnt_s cn68xxp1; 6189232812Sjmallett struct cvmx_lmcx_ops_cnt_s cnf71xx; 6190215976Sjmallett}; 6191215976Sjmalletttypedef union cvmx_lmcx_ops_cnt cvmx_lmcx_ops_cnt_t; 6192215976Sjmallett 6193215976Sjmallett/** 6194215976Sjmallett * cvmx_lmc#_ops_cnt_hi 6195215976Sjmallett * 6196215976Sjmallett * LMC_OPS_CNT_HI = Performance Counters 6197215976Sjmallett * 6198215976Sjmallett */ 6199232812Sjmallettunion cvmx_lmcx_ops_cnt_hi { 6200215976Sjmallett uint64_t u64; 6201232812Sjmallett struct cvmx_lmcx_ops_cnt_hi_s { 6202232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6203215976Sjmallett uint64_t reserved_32_63 : 32; 6204215976Sjmallett uint64_t opscnt_hi : 32; /**< Performance Counter to measure Bus Utilization 6205215976Sjmallett Upper 32-bits of 64-bit counter 6206215976Sjmallett DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */ 6207215976Sjmallett#else 6208215976Sjmallett uint64_t opscnt_hi : 32; 6209215976Sjmallett uint64_t reserved_32_63 : 32; 6210215976Sjmallett#endif 6211215976Sjmallett } s; 6212215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn30xx; 6213215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn31xx; 6214215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn38xx; 6215215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2; 6216215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn50xx; 6217215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn52xx; 6218215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1; 6219215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn56xx; 6220215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1; 6221215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn58xx; 6222215976Sjmallett struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1; 6223215976Sjmallett}; 6224215976Sjmalletttypedef union cvmx_lmcx_ops_cnt_hi cvmx_lmcx_ops_cnt_hi_t; 6225215976Sjmallett 6226215976Sjmallett/** 6227215976Sjmallett * cvmx_lmc#_ops_cnt_lo 6228215976Sjmallett * 6229215976Sjmallett * LMC_OPS_CNT_LO = Performance Counters 6230215976Sjmallett * 6231215976Sjmallett */ 6232232812Sjmallettunion cvmx_lmcx_ops_cnt_lo { 6233215976Sjmallett uint64_t u64; 6234232812Sjmallett struct cvmx_lmcx_ops_cnt_lo_s { 6235232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6236215976Sjmallett uint64_t reserved_32_63 : 32; 6237215976Sjmallett uint64_t opscnt_lo : 32; /**< Performance Counter 6238215976Sjmallett Low 32-bits of 64-bit counter 6239215976Sjmallett DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */ 6240215976Sjmallett#else 6241215976Sjmallett uint64_t opscnt_lo : 32; 6242215976Sjmallett uint64_t reserved_32_63 : 32; 6243215976Sjmallett#endif 6244215976Sjmallett } s; 6245215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn30xx; 6246215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn31xx; 6247215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn38xx; 6248215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2; 6249215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn50xx; 6250215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn52xx; 6251215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1; 6252215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn56xx; 6253215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1; 6254215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn58xx; 6255215976Sjmallett struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1; 6256215976Sjmallett}; 6257215976Sjmalletttypedef union cvmx_lmcx_ops_cnt_lo cvmx_lmcx_ops_cnt_lo_t; 6258215976Sjmallett 6259215976Sjmallett/** 6260215976Sjmallett * cvmx_lmc#_phy_ctl 6261215976Sjmallett * 6262215976Sjmallett * LMC_PHY_CTL = LMC PHY Control 6263215976Sjmallett * 6264215976Sjmallett */ 6265232812Sjmallettunion cvmx_lmcx_phy_ctl { 6266215976Sjmallett uint64_t u64; 6267232812Sjmallett struct cvmx_lmcx_phy_ctl_s { 6268232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6269215976Sjmallett uint64_t reserved_15_63 : 49; 6270215976Sjmallett uint64_t rx_always_on : 1; /**< Disable dynamic DDR3 IO Rx power gating */ 6271215976Sjmallett uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */ 6272215976Sjmallett uint64_t ck_tune1 : 1; /**< Clock Tune */ 6273215976Sjmallett uint64_t ck_dlyout1 : 4; /**< Clock delay out setting */ 6274215976Sjmallett uint64_t ck_tune0 : 1; /**< Clock Tune */ 6275215976Sjmallett uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */ 6276215976Sjmallett uint64_t loopback : 1; /**< Loopback enable */ 6277215976Sjmallett uint64_t loopback_pos : 1; /**< Loopback pos mode */ 6278215976Sjmallett uint64_t ts_stagger : 1; /**< TS Staggermode 6279215976Sjmallett This mode configures output drivers with 2-stage drive 6280215976Sjmallett strength to avoid undershoot issues on the bus when strong 6281215976Sjmallett drivers are suddenly turned on. When this mode is asserted, 6282215976Sjmallett Octeon will configure output drivers to be weak drivers 6283215976Sjmallett (60 ohm output impedance) at the first CK cycle, and 6284215976Sjmallett change drivers to the designated drive strengths specified 6285215976Sjmallett in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting 6286215976Sjmallett at the following cycle */ 6287215976Sjmallett#else 6288215976Sjmallett uint64_t ts_stagger : 1; 6289215976Sjmallett uint64_t loopback_pos : 1; 6290215976Sjmallett uint64_t loopback : 1; 6291215976Sjmallett uint64_t ck_dlyout0 : 4; 6292215976Sjmallett uint64_t ck_tune0 : 1; 6293215976Sjmallett uint64_t ck_dlyout1 : 4; 6294215976Sjmallett uint64_t ck_tune1 : 1; 6295215976Sjmallett uint64_t lv_mode : 1; 6296215976Sjmallett uint64_t rx_always_on : 1; 6297215976Sjmallett uint64_t reserved_15_63 : 49; 6298215976Sjmallett#endif 6299215976Sjmallett } s; 6300232812Sjmallett struct cvmx_lmcx_phy_ctl_s cn61xx; 6301215976Sjmallett struct cvmx_lmcx_phy_ctl_s cn63xx; 6302232812Sjmallett struct cvmx_lmcx_phy_ctl_cn63xxp1 { 6303232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6304215976Sjmallett uint64_t reserved_14_63 : 50; 6305215976Sjmallett uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */ 6306215976Sjmallett uint64_t ck_tune1 : 1; /**< Clock Tune */ 6307215976Sjmallett uint64_t ck_dlyout1 : 4; /**< Clock delay out setting */ 6308215976Sjmallett uint64_t ck_tune0 : 1; /**< Clock Tune */ 6309215976Sjmallett uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */ 6310215976Sjmallett uint64_t loopback : 1; /**< Loopback enable */ 6311215976Sjmallett uint64_t loopback_pos : 1; /**< Loopback pos mode */ 6312215976Sjmallett uint64_t ts_stagger : 1; /**< TS Staggermode 6313215976Sjmallett This mode configures output drivers with 2-stage drive 6314215976Sjmallett strength to avoid undershoot issues on the bus when strong 6315215976Sjmallett drivers are suddenly turned on. When this mode is asserted, 6316215976Sjmallett Octeon will configure output drivers to be weak drivers 6317215976Sjmallett (60 ohm output impedance) at the first CK cycle, and 6318215976Sjmallett change drivers to the designated drive strengths specified 6319215976Sjmallett in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting 6320215976Sjmallett at the following cycle */ 6321215976Sjmallett#else 6322215976Sjmallett uint64_t ts_stagger : 1; 6323215976Sjmallett uint64_t loopback_pos : 1; 6324215976Sjmallett uint64_t loopback : 1; 6325215976Sjmallett uint64_t ck_dlyout0 : 4; 6326215976Sjmallett uint64_t ck_tune0 : 1; 6327215976Sjmallett uint64_t ck_dlyout1 : 4; 6328215976Sjmallett uint64_t ck_tune1 : 1; 6329215976Sjmallett uint64_t lv_mode : 1; 6330215976Sjmallett uint64_t reserved_14_63 : 50; 6331215976Sjmallett#endif 6332215976Sjmallett } cn63xxp1; 6333232812Sjmallett struct cvmx_lmcx_phy_ctl_s cn66xx; 6334232812Sjmallett struct cvmx_lmcx_phy_ctl_s cn68xx; 6335232812Sjmallett struct cvmx_lmcx_phy_ctl_s cn68xxp1; 6336232812Sjmallett struct cvmx_lmcx_phy_ctl_s cnf71xx; 6337215976Sjmallett}; 6338215976Sjmalletttypedef union cvmx_lmcx_phy_ctl cvmx_lmcx_phy_ctl_t; 6339215976Sjmallett 6340215976Sjmallett/** 6341215976Sjmallett * cvmx_lmc#_pll_bwctl 6342215976Sjmallett * 6343215976Sjmallett * LMC_PLL_BWCTL = DDR PLL Bandwidth Control Register 6344215976Sjmallett * 6345215976Sjmallett */ 6346232812Sjmallettunion cvmx_lmcx_pll_bwctl { 6347215976Sjmallett uint64_t u64; 6348232812Sjmallett struct cvmx_lmcx_pll_bwctl_s { 6349232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6350215976Sjmallett uint64_t reserved_5_63 : 59; 6351215976Sjmallett uint64_t bwupd : 1; /**< Load this Bandwidth Register value into the PLL */ 6352215976Sjmallett uint64_t bwctl : 4; /**< Bandwidth Control Register for DDR PLL */ 6353215976Sjmallett#else 6354215976Sjmallett uint64_t bwctl : 4; 6355215976Sjmallett uint64_t bwupd : 1; 6356215976Sjmallett uint64_t reserved_5_63 : 59; 6357215976Sjmallett#endif 6358215976Sjmallett } s; 6359215976Sjmallett struct cvmx_lmcx_pll_bwctl_s cn30xx; 6360215976Sjmallett struct cvmx_lmcx_pll_bwctl_s cn31xx; 6361215976Sjmallett struct cvmx_lmcx_pll_bwctl_s cn38xx; 6362215976Sjmallett struct cvmx_lmcx_pll_bwctl_s cn38xxp2; 6363215976Sjmallett}; 6364215976Sjmalletttypedef union cvmx_lmcx_pll_bwctl cvmx_lmcx_pll_bwctl_t; 6365215976Sjmallett 6366215976Sjmallett/** 6367215976Sjmallett * cvmx_lmc#_pll_ctl 6368215976Sjmallett * 6369215976Sjmallett * LMC_PLL_CTL = LMC pll control 6370215976Sjmallett * 6371215976Sjmallett * 6372215976Sjmallett * Notes: 6373215976Sjmallett * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used. 6374215976Sjmallett * 6375215976Sjmallett * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set. 6376215976Sjmallett * 6377215976Sjmallett * The resultant DDR_CK frequency is the DDR2_REF_CLK 6378215976Sjmallett * frequency multiplied by: 6379215976Sjmallett * 6380215976Sjmallett * (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16)) 6381215976Sjmallett * 6382215976Sjmallett * The PLL frequency, which is: 6383215976Sjmallett * 6384215976Sjmallett * (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1)) 6385215976Sjmallett * 6386215976Sjmallett * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is desirable if there is a choice. 6387215976Sjmallett */ 6388232812Sjmallettunion cvmx_lmcx_pll_ctl { 6389215976Sjmallett uint64_t u64; 6390232812Sjmallett struct cvmx_lmcx_pll_ctl_s { 6391232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6392215976Sjmallett uint64_t reserved_30_63 : 34; 6393215976Sjmallett uint64_t bypass : 1; /**< PLL Bypass */ 6394215976Sjmallett uint64_t fasten_n : 1; /**< Should be set, especially when CLKF > ~80 */ 6395215976Sjmallett uint64_t div_reset : 1; /**< Analog pll divider reset 6396215976Sjmallett De-assert at least 500*(CLKR+1) reference clock 6397215976Sjmallett cycles following RESET_N de-assertion. */ 6398215976Sjmallett uint64_t reset_n : 1; /**< Analog pll reset 6399215976Sjmallett De-assert at least 5 usec after CLKF, CLKR, 6400215976Sjmallett and EN* are set up. */ 6401215976Sjmallett uint64_t clkf : 12; /**< Multiply reference by CLKF + 1 6402215976Sjmallett CLKF must be <= 128 */ 6403215976Sjmallett uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */ 6404215976Sjmallett uint64_t reserved_6_7 : 2; 6405215976Sjmallett uint64_t en16 : 1; /**< Divide output by 16 */ 6406215976Sjmallett uint64_t en12 : 1; /**< Divide output by 12 */ 6407215976Sjmallett uint64_t en8 : 1; /**< Divide output by 8 */ 6408215976Sjmallett uint64_t en6 : 1; /**< Divide output by 6 */ 6409215976Sjmallett uint64_t en4 : 1; /**< Divide output by 4 */ 6410215976Sjmallett uint64_t en2 : 1; /**< Divide output by 2 */ 6411215976Sjmallett#else 6412215976Sjmallett uint64_t en2 : 1; 6413215976Sjmallett uint64_t en4 : 1; 6414215976Sjmallett uint64_t en6 : 1; 6415215976Sjmallett uint64_t en8 : 1; 6416215976Sjmallett uint64_t en12 : 1; 6417215976Sjmallett uint64_t en16 : 1; 6418215976Sjmallett uint64_t reserved_6_7 : 2; 6419215976Sjmallett uint64_t clkr : 6; 6420215976Sjmallett uint64_t clkf : 12; 6421215976Sjmallett uint64_t reset_n : 1; 6422215976Sjmallett uint64_t div_reset : 1; 6423215976Sjmallett uint64_t fasten_n : 1; 6424215976Sjmallett uint64_t bypass : 1; 6425215976Sjmallett uint64_t reserved_30_63 : 34; 6426215976Sjmallett#endif 6427215976Sjmallett } s; 6428232812Sjmallett struct cvmx_lmcx_pll_ctl_cn50xx { 6429232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6430215976Sjmallett uint64_t reserved_29_63 : 35; 6431215976Sjmallett uint64_t fasten_n : 1; /**< Should be set, especially when CLKF > ~80 */ 6432215976Sjmallett uint64_t div_reset : 1; /**< Analog pll divider reset 6433215976Sjmallett De-assert at least 500*(CLKR+1) reference clock 6434215976Sjmallett cycles following RESET_N de-assertion. */ 6435215976Sjmallett uint64_t reset_n : 1; /**< Analog pll reset 6436215976Sjmallett De-assert at least 5 usec after CLKF, CLKR, 6437215976Sjmallett and EN* are set up. */ 6438215976Sjmallett uint64_t clkf : 12; /**< Multiply reference by CLKF + 1 6439215976Sjmallett CLKF must be <= 256 */ 6440215976Sjmallett uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */ 6441215976Sjmallett uint64_t reserved_6_7 : 2; 6442215976Sjmallett uint64_t en16 : 1; /**< Divide output by 16 */ 6443215976Sjmallett uint64_t en12 : 1; /**< Divide output by 12 */ 6444215976Sjmallett uint64_t en8 : 1; /**< Divide output by 8 */ 6445215976Sjmallett uint64_t en6 : 1; /**< Divide output by 6 */ 6446215976Sjmallett uint64_t en4 : 1; /**< Divide output by 4 */ 6447215976Sjmallett uint64_t en2 : 1; /**< Divide output by 2 */ 6448215976Sjmallett#else 6449215976Sjmallett uint64_t en2 : 1; 6450215976Sjmallett uint64_t en4 : 1; 6451215976Sjmallett uint64_t en6 : 1; 6452215976Sjmallett uint64_t en8 : 1; 6453215976Sjmallett uint64_t en12 : 1; 6454215976Sjmallett uint64_t en16 : 1; 6455215976Sjmallett uint64_t reserved_6_7 : 2; 6456215976Sjmallett uint64_t clkr : 6; 6457215976Sjmallett uint64_t clkf : 12; 6458215976Sjmallett uint64_t reset_n : 1; 6459215976Sjmallett uint64_t div_reset : 1; 6460215976Sjmallett uint64_t fasten_n : 1; 6461215976Sjmallett uint64_t reserved_29_63 : 35; 6462215976Sjmallett#endif 6463215976Sjmallett } cn50xx; 6464215976Sjmallett struct cvmx_lmcx_pll_ctl_s cn52xx; 6465215976Sjmallett struct cvmx_lmcx_pll_ctl_s cn52xxp1; 6466215976Sjmallett struct cvmx_lmcx_pll_ctl_cn50xx cn56xx; 6467232812Sjmallett struct cvmx_lmcx_pll_ctl_cn56xxp1 { 6468232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6469215976Sjmallett uint64_t reserved_28_63 : 36; 6470215976Sjmallett uint64_t div_reset : 1; /**< Analog pll divider reset 6471215976Sjmallett De-assert at least 500*(CLKR+1) reference clock 6472215976Sjmallett cycles following RESET_N de-assertion. */ 6473215976Sjmallett uint64_t reset_n : 1; /**< Analog pll reset 6474215976Sjmallett De-assert at least 5 usec after CLKF, CLKR, 6475215976Sjmallett and EN* are set up. */ 6476215976Sjmallett uint64_t clkf : 12; /**< Multiply reference by CLKF + 1 6477215976Sjmallett CLKF must be <= 128 */ 6478215976Sjmallett uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */ 6479215976Sjmallett uint64_t reserved_6_7 : 2; 6480215976Sjmallett uint64_t en16 : 1; /**< Divide output by 16 */ 6481215976Sjmallett uint64_t en12 : 1; /**< Divide output by 12 */ 6482215976Sjmallett uint64_t en8 : 1; /**< Divide output by 8 */ 6483215976Sjmallett uint64_t en6 : 1; /**< Divide output by 6 */ 6484215976Sjmallett uint64_t en4 : 1; /**< Divide output by 4 */ 6485215976Sjmallett uint64_t en2 : 1; /**< Divide output by 2 */ 6486215976Sjmallett#else 6487215976Sjmallett uint64_t en2 : 1; 6488215976Sjmallett uint64_t en4 : 1; 6489215976Sjmallett uint64_t en6 : 1; 6490215976Sjmallett uint64_t en8 : 1; 6491215976Sjmallett uint64_t en12 : 1; 6492215976Sjmallett uint64_t en16 : 1; 6493215976Sjmallett uint64_t reserved_6_7 : 2; 6494215976Sjmallett uint64_t clkr : 6; 6495215976Sjmallett uint64_t clkf : 12; 6496215976Sjmallett uint64_t reset_n : 1; 6497215976Sjmallett uint64_t div_reset : 1; 6498215976Sjmallett uint64_t reserved_28_63 : 36; 6499215976Sjmallett#endif 6500215976Sjmallett } cn56xxp1; 6501215976Sjmallett struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx; 6502215976Sjmallett struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1; 6503215976Sjmallett}; 6504215976Sjmalletttypedef union cvmx_lmcx_pll_ctl cvmx_lmcx_pll_ctl_t; 6505215976Sjmallett 6506215976Sjmallett/** 6507215976Sjmallett * cvmx_lmc#_pll_status 6508215976Sjmallett * 6509215976Sjmallett * LMC_PLL_STATUS = LMC pll status 6510215976Sjmallett * 6511215976Sjmallett */ 6512232812Sjmallettunion cvmx_lmcx_pll_status { 6513215976Sjmallett uint64_t u64; 6514232812Sjmallett struct cvmx_lmcx_pll_status_s { 6515232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6516215976Sjmallett uint64_t reserved_32_63 : 32; 6517215976Sjmallett uint64_t ddr__nctl : 5; /**< DDR nctl from compensation circuit */ 6518215976Sjmallett uint64_t ddr__pctl : 5; /**< DDR pctl from compensation circuit */ 6519215976Sjmallett uint64_t reserved_2_21 : 20; 6520215976Sjmallett uint64_t rfslip : 1; /**< Reference clock slip */ 6521215976Sjmallett uint64_t fbslip : 1; /**< Feedback clock slip */ 6522215976Sjmallett#else 6523215976Sjmallett uint64_t fbslip : 1; 6524215976Sjmallett uint64_t rfslip : 1; 6525215976Sjmallett uint64_t reserved_2_21 : 20; 6526215976Sjmallett uint64_t ddr__pctl : 5; 6527215976Sjmallett uint64_t ddr__nctl : 5; 6528215976Sjmallett uint64_t reserved_32_63 : 32; 6529215976Sjmallett#endif 6530215976Sjmallett } s; 6531215976Sjmallett struct cvmx_lmcx_pll_status_s cn50xx; 6532215976Sjmallett struct cvmx_lmcx_pll_status_s cn52xx; 6533215976Sjmallett struct cvmx_lmcx_pll_status_s cn52xxp1; 6534215976Sjmallett struct cvmx_lmcx_pll_status_s cn56xx; 6535215976Sjmallett struct cvmx_lmcx_pll_status_s cn56xxp1; 6536215976Sjmallett struct cvmx_lmcx_pll_status_s cn58xx; 6537232812Sjmallett struct cvmx_lmcx_pll_status_cn58xxp1 { 6538232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6539215976Sjmallett uint64_t reserved_2_63 : 62; 6540215976Sjmallett uint64_t rfslip : 1; /**< Reference clock slip */ 6541215976Sjmallett uint64_t fbslip : 1; /**< Feedback clock slip */ 6542215976Sjmallett#else 6543215976Sjmallett uint64_t fbslip : 1; 6544215976Sjmallett uint64_t rfslip : 1; 6545215976Sjmallett uint64_t reserved_2_63 : 62; 6546215976Sjmallett#endif 6547215976Sjmallett } cn58xxp1; 6548215976Sjmallett}; 6549215976Sjmalletttypedef union cvmx_lmcx_pll_status cvmx_lmcx_pll_status_t; 6550215976Sjmallett 6551215976Sjmallett/** 6552215976Sjmallett * cvmx_lmc#_read_level_ctl 6553215976Sjmallett * 6554215976Sjmallett * Notes: 6555215976Sjmallett * The HW writes and reads the cache block selected by ROW, COL, BNK and the rank as part of a read-leveling sequence for a rank. 6556215976Sjmallett * A cache block write is 16 72-bit words. PATTERN selects the write value. For the first 8 6557215976Sjmallett * words, the write value is the bit PATTERN<i> duplicated into a 72-bit vector. The write value of 6558215976Sjmallett * the last 8 words is the inverse of the write value of the first 8 words. 6559215976Sjmallett * See LMC*_READ_LEVEL_RANK*. 6560215976Sjmallett */ 6561232812Sjmallettunion cvmx_lmcx_read_level_ctl { 6562215976Sjmallett uint64_t u64; 6563232812Sjmallett struct cvmx_lmcx_read_level_ctl_s { 6564232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6565215976Sjmallett uint64_t reserved_44_63 : 20; 6566215976Sjmallett uint64_t rankmask : 4; /**< Selects ranks to be leveled 6567215976Sjmallett to read-level rank i, set RANKMASK<i> */ 6568215976Sjmallett uint64_t pattern : 8; /**< All DQ driven to PATTERN[burst], 0 <= burst <= 7 6569215976Sjmallett All DQ driven to ~PATTERN[burst-8], 8 <= burst <= 15 */ 6570215976Sjmallett uint64_t row : 16; /**< Row address used to write/read data pattern */ 6571215976Sjmallett uint64_t col : 12; /**< Column address used to write/read data pattern */ 6572215976Sjmallett uint64_t reserved_3_3 : 1; 6573215976Sjmallett uint64_t bnk : 3; /**< Bank address used to write/read data pattern */ 6574215976Sjmallett#else 6575215976Sjmallett uint64_t bnk : 3; 6576215976Sjmallett uint64_t reserved_3_3 : 1; 6577215976Sjmallett uint64_t col : 12; 6578215976Sjmallett uint64_t row : 16; 6579215976Sjmallett uint64_t pattern : 8; 6580215976Sjmallett uint64_t rankmask : 4; 6581215976Sjmallett uint64_t reserved_44_63 : 20; 6582215976Sjmallett#endif 6583215976Sjmallett } s; 6584215976Sjmallett struct cvmx_lmcx_read_level_ctl_s cn52xx; 6585215976Sjmallett struct cvmx_lmcx_read_level_ctl_s cn52xxp1; 6586215976Sjmallett struct cvmx_lmcx_read_level_ctl_s cn56xx; 6587215976Sjmallett struct cvmx_lmcx_read_level_ctl_s cn56xxp1; 6588215976Sjmallett}; 6589215976Sjmalletttypedef union cvmx_lmcx_read_level_ctl cvmx_lmcx_read_level_ctl_t; 6590215976Sjmallett 6591215976Sjmallett/** 6592215976Sjmallett * cvmx_lmc#_read_level_dbg 6593215976Sjmallett * 6594215976Sjmallett * Notes: 6595215976Sjmallett * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail results for all possible 6596215976Sjmallett * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled. 6597215976Sjmallett * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte. 6598215976Sjmallett * To get these pass/fail results for another different rank, you must run the hardware read-leveling 6599215976Sjmallett * again. For example, it is possible to get the BITMASK results for every byte of every rank 6600215976Sjmallett * if you run read-leveling separately for each rank, probing LMC*_READ_LEVEL_DBG between each 6601215976Sjmallett * read-leveling. 6602215976Sjmallett */ 6603232812Sjmallettunion cvmx_lmcx_read_level_dbg { 6604215976Sjmallett uint64_t u64; 6605232812Sjmallett struct cvmx_lmcx_read_level_dbg_s { 6606232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6607215976Sjmallett uint64_t reserved_32_63 : 32; 6608215976Sjmallett uint64_t bitmask : 16; /**< Bitmask generated during deskew settings sweep 6609215976Sjmallett BITMASK[n]=0 means deskew setting n failed 6610215976Sjmallett BITMASK[n]=1 means deskew setting n passed 6611215976Sjmallett for 0 <= n <= 15 */ 6612215976Sjmallett uint64_t reserved_4_15 : 12; 6613215976Sjmallett uint64_t byte : 4; /**< 0 <= BYTE <= 8 */ 6614215976Sjmallett#else 6615215976Sjmallett uint64_t byte : 4; 6616215976Sjmallett uint64_t reserved_4_15 : 12; 6617215976Sjmallett uint64_t bitmask : 16; 6618215976Sjmallett uint64_t reserved_32_63 : 32; 6619215976Sjmallett#endif 6620215976Sjmallett } s; 6621215976Sjmallett struct cvmx_lmcx_read_level_dbg_s cn52xx; 6622215976Sjmallett struct cvmx_lmcx_read_level_dbg_s cn52xxp1; 6623215976Sjmallett struct cvmx_lmcx_read_level_dbg_s cn56xx; 6624215976Sjmallett struct cvmx_lmcx_read_level_dbg_s cn56xxp1; 6625215976Sjmallett}; 6626215976Sjmalletttypedef union cvmx_lmcx_read_level_dbg cvmx_lmcx_read_level_dbg_t; 6627215976Sjmallett 6628215976Sjmallett/** 6629215976Sjmallett * cvmx_lmc#_read_level_rank# 6630215976Sjmallett * 6631215976Sjmallett * Notes: 6632215976Sjmallett * This is four CSRs per LMC, one per each rank. 6633215976Sjmallett * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.) 6634215976Sjmallett * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.) 6635215976Sjmallett * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE* values can range over 4 DCLKs. 6636215976Sjmallett * SW initiates a HW read-leveling sequence by programming LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1. 6637215976Sjmallett * See LMC*_READ_LEVEL_CTL. 6638215976Sjmallett */ 6639232812Sjmallettunion cvmx_lmcx_read_level_rankx { 6640215976Sjmallett uint64_t u64; 6641232812Sjmallett struct cvmx_lmcx_read_level_rankx_s { 6642232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6643215976Sjmallett uint64_t reserved_38_63 : 26; 6644215976Sjmallett uint64_t status : 2; /**< Indicates status of the read-levelling and where 6645215976Sjmallett the BYTE* programmings in <35:0> came from: 6646215976Sjmallett 0 = BYTE* values are their reset value 6647215976Sjmallett 1 = BYTE* values were set via a CSR write to this register 6648215976Sjmallett 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable) 6649215976Sjmallett 3 = BYTE* values came from a complete read-leveling sequence */ 6650215976Sjmallett uint64_t byte8 : 4; /**< Deskew setting */ 6651215976Sjmallett uint64_t byte7 : 4; /**< Deskew setting */ 6652215976Sjmallett uint64_t byte6 : 4; /**< Deskew setting */ 6653215976Sjmallett uint64_t byte5 : 4; /**< Deskew setting */ 6654215976Sjmallett uint64_t byte4 : 4; /**< Deskew setting */ 6655215976Sjmallett uint64_t byte3 : 4; /**< Deskew setting */ 6656215976Sjmallett uint64_t byte2 : 4; /**< Deskew setting */ 6657215976Sjmallett uint64_t byte1 : 4; /**< Deskew setting */ 6658215976Sjmallett uint64_t byte0 : 4; /**< Deskew setting */ 6659215976Sjmallett#else 6660215976Sjmallett uint64_t byte0 : 4; 6661215976Sjmallett uint64_t byte1 : 4; 6662215976Sjmallett uint64_t byte2 : 4; 6663215976Sjmallett uint64_t byte3 : 4; 6664215976Sjmallett uint64_t byte4 : 4; 6665215976Sjmallett uint64_t byte5 : 4; 6666215976Sjmallett uint64_t byte6 : 4; 6667215976Sjmallett uint64_t byte7 : 4; 6668215976Sjmallett uint64_t byte8 : 4; 6669215976Sjmallett uint64_t status : 2; 6670215976Sjmallett uint64_t reserved_38_63 : 26; 6671215976Sjmallett#endif 6672215976Sjmallett } s; 6673215976Sjmallett struct cvmx_lmcx_read_level_rankx_s cn52xx; 6674215976Sjmallett struct cvmx_lmcx_read_level_rankx_s cn52xxp1; 6675215976Sjmallett struct cvmx_lmcx_read_level_rankx_s cn56xx; 6676215976Sjmallett struct cvmx_lmcx_read_level_rankx_s cn56xxp1; 6677215976Sjmallett}; 6678215976Sjmalletttypedef union cvmx_lmcx_read_level_rankx cvmx_lmcx_read_level_rankx_t; 6679215976Sjmallett 6680215976Sjmallett/** 6681215976Sjmallett * cvmx_lmc#_reset_ctl 6682215976Sjmallett * 6683215976Sjmallett * Specify the RSL base addresses for the block 6684215976Sjmallett * 6685215976Sjmallett * 6686215976Sjmallett * Notes: 6687215976Sjmallett * DDR3RST - DDR3 DRAM parts have a new RESET# 6688215976Sjmallett * pin that wasn't present in DDR2 parts. The 6689215976Sjmallett * DDR3RST CSR field controls the assertion of 6690232812Sjmallett * the new 6xxx pin that attaches to RESET#. 6691232812Sjmallett * When DDR3RST is set, 6xxx asserts RESET#. 6692232812Sjmallett * When DDR3RST is clear, 6xxx de-asserts 6693215976Sjmallett * RESET#. 6694215976Sjmallett * 6695215976Sjmallett * DDR3RST is set on a cold reset. Warm and 6696215976Sjmallett * soft chip resets do not affect the DDR3RST 6697215976Sjmallett * value. Outside of cold reset, only software 6698215976Sjmallett * CSR writes change the DDR3RST value. 6699215976Sjmallett * 6700215976Sjmallett * DDR3PWARM - Enables preserve mode during a warm 6701215976Sjmallett * reset. When set, the DDR3 controller hardware 6702215976Sjmallett * automatically puts the attached DDR3 DRAM parts 6703215976Sjmallett * into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a warm 6704215976Sjmallett * reset sequence, provided that the DDR3 controller 6705215976Sjmallett * is up. When clear, the DDR3 controller hardware 6706215976Sjmallett * does not put the attached DDR3 DRAM parts into 6707215976Sjmallett * self-refresh during a warm reset sequence. 6708215976Sjmallett * 6709215976Sjmallett * DDR3PWARM is cleared on a cold reset. Warm and 6710215976Sjmallett * soft chip resets do not affect the DDR3PWARM 6711215976Sjmallett * value. Outside of cold reset, only software 6712215976Sjmallett * CSR writes change the DDR3PWARM value. 6713215976Sjmallett * 6714215976Sjmallett * Note that if a warm reset follows a soft reset, 6715215976Sjmallett * DDR3PWARM has no effect, as the DDR3 controller 6716215976Sjmallett * is no longer up after any cold/warm/soft 6717215976Sjmallett * reset sequence. 6718215976Sjmallett * 6719215976Sjmallett * DDR3PSOFT - Enables preserve mode during a soft 6720215976Sjmallett * reset. When set, the DDR3 controller hardware 6721215976Sjmallett * automatically puts the attached DDR3 DRAM parts 6722215976Sjmallett * into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a soft 6723215976Sjmallett * reset sequence, provided that the DDR3 controller 6724215976Sjmallett * is up. When clear, the DDR3 controller hardware 6725215976Sjmallett * does not put the attached DDR3 DRAM parts into 6726215976Sjmallett * self-refresh during a soft reset sequence. 6727215976Sjmallett * 6728215976Sjmallett * DDR3PSOFT is cleared on a cold reset. Warm and 6729215976Sjmallett * soft chip resets do not affect the DDR3PSOFT 6730215976Sjmallett * value. Outside of cold reset, only software 6731215976Sjmallett * CSR writes change the DDR3PSOFT value. 6732215976Sjmallett * 6733215976Sjmallett * DDR3PSV - May be useful for system software to 6734215976Sjmallett * determine when the DDR3 contents have been 6735215976Sjmallett * preserved. 6736215976Sjmallett * 6737215976Sjmallett * Cleared by hardware during a cold reset. Never 6738215976Sjmallett * cleared by hardware during a warm/soft reset. 6739215976Sjmallett * Set by hardware during a warm/soft reset if 6740215976Sjmallett * the hardware automatically put the DDR3 DRAM 6741215976Sjmallett * into self-refresh during the reset sequence. 6742215976Sjmallett * 6743215976Sjmallett * Can also be written by software (to any value). 6744215976Sjmallett */ 6745232812Sjmallettunion cvmx_lmcx_reset_ctl { 6746215976Sjmallett uint64_t u64; 6747232812Sjmallett struct cvmx_lmcx_reset_ctl_s { 6748232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6749215976Sjmallett uint64_t reserved_4_63 : 60; 6750215976Sjmallett uint64_t ddr3psv : 1; /**< Memory Reset 6751215976Sjmallett 1 = DDR contents preserved */ 6752215976Sjmallett uint64_t ddr3psoft : 1; /**< Memory Reset 6753215976Sjmallett 1 = Enable Preserve mode during soft reset */ 6754215976Sjmallett uint64_t ddr3pwarm : 1; /**< Memory Reset 6755215976Sjmallett 1 = Enable Preserve mode during warm reset */ 6756215976Sjmallett uint64_t ddr3rst : 1; /**< Memory Reset 6757215976Sjmallett 0 = Reset asserted 6758215976Sjmallett 1 = Reset de-asserted */ 6759215976Sjmallett#else 6760215976Sjmallett uint64_t ddr3rst : 1; 6761215976Sjmallett uint64_t ddr3pwarm : 1; 6762215976Sjmallett uint64_t ddr3psoft : 1; 6763215976Sjmallett uint64_t ddr3psv : 1; 6764215976Sjmallett uint64_t reserved_4_63 : 60; 6765215976Sjmallett#endif 6766215976Sjmallett } s; 6767232812Sjmallett struct cvmx_lmcx_reset_ctl_s cn61xx; 6768215976Sjmallett struct cvmx_lmcx_reset_ctl_s cn63xx; 6769215976Sjmallett struct cvmx_lmcx_reset_ctl_s cn63xxp1; 6770232812Sjmallett struct cvmx_lmcx_reset_ctl_s cn66xx; 6771232812Sjmallett struct cvmx_lmcx_reset_ctl_s cn68xx; 6772232812Sjmallett struct cvmx_lmcx_reset_ctl_s cn68xxp1; 6773232812Sjmallett struct cvmx_lmcx_reset_ctl_s cnf71xx; 6774215976Sjmallett}; 6775215976Sjmalletttypedef union cvmx_lmcx_reset_ctl cvmx_lmcx_reset_ctl_t; 6776215976Sjmallett 6777215976Sjmallett/** 6778215976Sjmallett * cvmx_lmc#_rlevel_ctl 6779215976Sjmallett */ 6780232812Sjmallettunion cvmx_lmcx_rlevel_ctl { 6781215976Sjmallett uint64_t u64; 6782232812Sjmallett struct cvmx_lmcx_rlevel_ctl_s { 6783232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6784215976Sjmallett uint64_t reserved_22_63 : 42; 6785215976Sjmallett uint64_t delay_unload_3 : 1; /**< When set, unload the PHY silo one cycle later 6786215976Sjmallett during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 3 6787215976Sjmallett DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */ 6788215976Sjmallett uint64_t delay_unload_2 : 1; /**< When set, unload the PHY silo one cycle later 6789215976Sjmallett during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 2 6790215976Sjmallett DELAY_UNLOAD_2 should normally not be set. */ 6791215976Sjmallett uint64_t delay_unload_1 : 1; /**< When set, unload the PHY silo one cycle later 6792215976Sjmallett during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 1 6793215976Sjmallett DELAY_UNLOAD_1 should normally not be set. */ 6794215976Sjmallett uint64_t delay_unload_0 : 1; /**< When set, unload the PHY silo one cycle later 6795215976Sjmallett during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 0 6796215976Sjmallett DELAY_UNLOAD_0 should normally not be set. */ 6797215976Sjmallett uint64_t bitmask : 8; /**< Mask to select bit lanes on which read-leveling 6798215976Sjmallett feedback is returned when OR_DIS is set to 1 */ 6799215976Sjmallett uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing 6800215976Sjmallett the read-leveling bitmask 6801215976Sjmallett OR_DIS should normally not be set. */ 6802215976Sjmallett uint64_t offset_en : 1; /**< When set, LMC attempts to select the read-leveling 6803215976Sjmallett setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the 6804215976Sjmallett last passing read-leveling setting in the largest 6805215976Sjmallett contiguous sequence of passing settings. 6806215976Sjmallett When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET] 6807215976Sjmallett did not pass, LMC selects the middle setting in the 6808215976Sjmallett largest contiguous sequence of passing settings, 6809215976Sjmallett rounding earlier when necessary. */ 6810215976Sjmallett uint64_t offset : 4; /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */ 6811215976Sjmallett uint64_t byte : 4; /**< 0 <= BYTE <= 8 6812215976Sjmallett Byte index for which bitmask results are saved 6813215976Sjmallett in LMC*_RLEVEL_DBG */ 6814215976Sjmallett#else 6815215976Sjmallett uint64_t byte : 4; 6816215976Sjmallett uint64_t offset : 4; 6817215976Sjmallett uint64_t offset_en : 1; 6818215976Sjmallett uint64_t or_dis : 1; 6819215976Sjmallett uint64_t bitmask : 8; 6820215976Sjmallett uint64_t delay_unload_0 : 1; 6821215976Sjmallett uint64_t delay_unload_1 : 1; 6822215976Sjmallett uint64_t delay_unload_2 : 1; 6823215976Sjmallett uint64_t delay_unload_3 : 1; 6824215976Sjmallett uint64_t reserved_22_63 : 42; 6825215976Sjmallett#endif 6826215976Sjmallett } s; 6827232812Sjmallett struct cvmx_lmcx_rlevel_ctl_s cn61xx; 6828215976Sjmallett struct cvmx_lmcx_rlevel_ctl_s cn63xx; 6829232812Sjmallett struct cvmx_lmcx_rlevel_ctl_cn63xxp1 { 6830232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6831215976Sjmallett uint64_t reserved_9_63 : 55; 6832215976Sjmallett uint64_t offset_en : 1; /**< When set, LMC attempts to select the read-leveling 6833215976Sjmallett setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the 6834215976Sjmallett last passing read-leveling setting in the largest 6835215976Sjmallett contiguous sequence of passing settings. 6836215976Sjmallett When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET] 6837215976Sjmallett did not pass, LMC selects the middle setting in the 6838215976Sjmallett largest contiguous sequence of passing settings, 6839215976Sjmallett rounding earlier when necessary. */ 6840215976Sjmallett uint64_t offset : 4; /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */ 6841215976Sjmallett uint64_t byte : 4; /**< 0 <= BYTE <= 8 6842215976Sjmallett Byte index for which bitmask results are saved 6843215976Sjmallett in LMC*_RLEVEL_DBG */ 6844215976Sjmallett#else 6845215976Sjmallett uint64_t byte : 4; 6846215976Sjmallett uint64_t offset : 4; 6847215976Sjmallett uint64_t offset_en : 1; 6848215976Sjmallett uint64_t reserved_9_63 : 55; 6849215976Sjmallett#endif 6850215976Sjmallett } cn63xxp1; 6851232812Sjmallett struct cvmx_lmcx_rlevel_ctl_s cn66xx; 6852232812Sjmallett struct cvmx_lmcx_rlevel_ctl_s cn68xx; 6853232812Sjmallett struct cvmx_lmcx_rlevel_ctl_s cn68xxp1; 6854232812Sjmallett struct cvmx_lmcx_rlevel_ctl_s cnf71xx; 6855215976Sjmallett}; 6856215976Sjmalletttypedef union cvmx_lmcx_rlevel_ctl cvmx_lmcx_rlevel_ctl_t; 6857215976Sjmallett 6858215976Sjmallett/** 6859215976Sjmallett * cvmx_lmc#_rlevel_dbg 6860215976Sjmallett * 6861215976Sjmallett * Notes: 6862215976Sjmallett * A given read of LMC*_RLEVEL_DBG returns the read-leveling pass/fail results for all possible 6863215976Sjmallett * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled. 6864215976Sjmallett * LMC*_RLEVEL_CTL[BYTE] selects the particular byte. 6865215976Sjmallett * 6866215976Sjmallett * To get these pass/fail results for another different rank, you must run the hardware read-leveling 6867215976Sjmallett * again. For example, it is possible to get the BITMASK results for every byte of every rank 6868215976Sjmallett * if you run read-leveling separately for each rank, probing LMC*_RLEVEL_DBG between each 6869215976Sjmallett * read-leveling. 6870215976Sjmallett */ 6871232812Sjmallettunion cvmx_lmcx_rlevel_dbg { 6872215976Sjmallett uint64_t u64; 6873232812Sjmallett struct cvmx_lmcx_rlevel_dbg_s { 6874232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6875215976Sjmallett uint64_t bitmask : 64; /**< Bitmask generated during deskew settings sweep 6876215976Sjmallett BITMASK[n]=0 means deskew setting n failed 6877215976Sjmallett BITMASK[n]=1 means deskew setting n passed 6878215976Sjmallett for 0 <= n <= 63 */ 6879215976Sjmallett#else 6880215976Sjmallett uint64_t bitmask : 64; 6881215976Sjmallett#endif 6882215976Sjmallett } s; 6883232812Sjmallett struct cvmx_lmcx_rlevel_dbg_s cn61xx; 6884215976Sjmallett struct cvmx_lmcx_rlevel_dbg_s cn63xx; 6885215976Sjmallett struct cvmx_lmcx_rlevel_dbg_s cn63xxp1; 6886232812Sjmallett struct cvmx_lmcx_rlevel_dbg_s cn66xx; 6887232812Sjmallett struct cvmx_lmcx_rlevel_dbg_s cn68xx; 6888232812Sjmallett struct cvmx_lmcx_rlevel_dbg_s cn68xxp1; 6889232812Sjmallett struct cvmx_lmcx_rlevel_dbg_s cnf71xx; 6890215976Sjmallett}; 6891215976Sjmalletttypedef union cvmx_lmcx_rlevel_dbg cvmx_lmcx_rlevel_dbg_t; 6892215976Sjmallett 6893215976Sjmallett/** 6894215976Sjmallett * cvmx_lmc#_rlevel_rank# 6895215976Sjmallett * 6896215976Sjmallett * Notes: 6897215976Sjmallett * This is four CSRs per LMC, one per each rank. 6898215976Sjmallett * 6899215976Sjmallett * Deskew setting is measured in units of 1/4 CK, so the above BYTE* values can range over 16 CKs. 6900215976Sjmallett * 6901215976Sjmallett * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.) 6902215976Sjmallett * If HW is unable to find a match per LMC*_RLEVEL_CTL[OFFSET_ENA] and LMC*_RLEVEL_CTL[OFFSET], then HW will set LMC*_RLEVEL_RANKi[BYTE*<5:0>] 6903215976Sjmallett * to 0. 6904215976Sjmallett * 6905215976Sjmallett * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.) 6906215976Sjmallett * 6907215976Sjmallett * SW initiates a HW read-leveling sequence by programming LMC*_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1. 6908215976Sjmallett * See LMC*_RLEVEL_CTL. 6909215976Sjmallett * 6910215976Sjmallett * LMC*_RLEVEL_RANKi values for ranks i without attached DRAM should be set such that 6911215976Sjmallett * they do not increase the range of possible BYTE values for any byte 6912215976Sjmallett * lane. The easiest way to do this is to set 6913215976Sjmallett * LMC*_RLEVEL_RANKi = LMC*_RLEVEL_RANKj, 6914215976Sjmallett * where j is some rank with attached DRAM whose LMC*_RLEVEL_RANKj is already fully initialized. 6915215976Sjmallett */ 6916232812Sjmallettunion cvmx_lmcx_rlevel_rankx { 6917215976Sjmallett uint64_t u64; 6918232812Sjmallett struct cvmx_lmcx_rlevel_rankx_s { 6919232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6920215976Sjmallett uint64_t reserved_56_63 : 8; 6921215976Sjmallett uint64_t status : 2; /**< Indicates status of the read-levelling and where 6922215976Sjmallett the BYTE* programmings in <35:0> came from: 6923215976Sjmallett 0 = BYTE* values are their reset value 6924215976Sjmallett 1 = BYTE* values were set via a CSR write to this register 6925215976Sjmallett 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable) 6926215976Sjmallett 3 = BYTE* values came from a complete read-leveling sequence */ 6927215976Sjmallett uint64_t byte8 : 6; /**< Deskew setting 6928215976Sjmallett When ECC DRAM is not present (i.e. when DRAM is not 6929215976Sjmallett attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]), 6930215976Sjmallett SW should write BYTE8 to a value that does 6931215976Sjmallett not increase the range of possible BYTE* values. The 6932215976Sjmallett easiest way to do this is to set 6933215976Sjmallett LMC*_RLEVEL_RANK*[BYTE8] = LMC*_RLEVEL_RANK*[BYTE0] 6934215976Sjmallett when there is no ECC DRAM, using the final BYTE0 value. */ 6935215976Sjmallett uint64_t byte7 : 6; /**< Deskew setting */ 6936215976Sjmallett uint64_t byte6 : 6; /**< Deskew setting */ 6937215976Sjmallett uint64_t byte5 : 6; /**< Deskew setting */ 6938215976Sjmallett uint64_t byte4 : 6; /**< Deskew setting */ 6939215976Sjmallett uint64_t byte3 : 6; /**< Deskew setting */ 6940215976Sjmallett uint64_t byte2 : 6; /**< Deskew setting */ 6941215976Sjmallett uint64_t byte1 : 6; /**< Deskew setting */ 6942215976Sjmallett uint64_t byte0 : 6; /**< Deskew setting */ 6943215976Sjmallett#else 6944215976Sjmallett uint64_t byte0 : 6; 6945215976Sjmallett uint64_t byte1 : 6; 6946215976Sjmallett uint64_t byte2 : 6; 6947215976Sjmallett uint64_t byte3 : 6; 6948215976Sjmallett uint64_t byte4 : 6; 6949215976Sjmallett uint64_t byte5 : 6; 6950215976Sjmallett uint64_t byte6 : 6; 6951215976Sjmallett uint64_t byte7 : 6; 6952215976Sjmallett uint64_t byte8 : 6; 6953215976Sjmallett uint64_t status : 2; 6954215976Sjmallett uint64_t reserved_56_63 : 8; 6955215976Sjmallett#endif 6956215976Sjmallett } s; 6957232812Sjmallett struct cvmx_lmcx_rlevel_rankx_s cn61xx; 6958215976Sjmallett struct cvmx_lmcx_rlevel_rankx_s cn63xx; 6959215976Sjmallett struct cvmx_lmcx_rlevel_rankx_s cn63xxp1; 6960232812Sjmallett struct cvmx_lmcx_rlevel_rankx_s cn66xx; 6961232812Sjmallett struct cvmx_lmcx_rlevel_rankx_s cn68xx; 6962232812Sjmallett struct cvmx_lmcx_rlevel_rankx_s cn68xxp1; 6963232812Sjmallett struct cvmx_lmcx_rlevel_rankx_s cnf71xx; 6964215976Sjmallett}; 6965215976Sjmalletttypedef union cvmx_lmcx_rlevel_rankx cvmx_lmcx_rlevel_rankx_t; 6966215976Sjmallett 6967215976Sjmallett/** 6968215976Sjmallett * cvmx_lmc#_rodt_comp_ctl 6969215976Sjmallett * 6970215976Sjmallett * LMC_RODT_COMP_CTL = LMC Compensation control 6971215976Sjmallett * 6972215976Sjmallett */ 6973232812Sjmallettunion cvmx_lmcx_rodt_comp_ctl { 6974215976Sjmallett uint64_t u64; 6975232812Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s { 6976232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 6977215976Sjmallett uint64_t reserved_17_63 : 47; 6978215976Sjmallett uint64_t enable : 1; /**< 0=not enabled, 1=enable */ 6979215976Sjmallett uint64_t reserved_12_15 : 4; 6980215976Sjmallett uint64_t nctl : 4; /**< Compensation control bits */ 6981215976Sjmallett uint64_t reserved_5_7 : 3; 6982215976Sjmallett uint64_t pctl : 5; /**< Compensation control bits */ 6983215976Sjmallett#else 6984215976Sjmallett uint64_t pctl : 5; 6985215976Sjmallett uint64_t reserved_5_7 : 3; 6986215976Sjmallett uint64_t nctl : 4; 6987215976Sjmallett uint64_t reserved_12_15 : 4; 6988215976Sjmallett uint64_t enable : 1; 6989215976Sjmallett uint64_t reserved_17_63 : 47; 6990215976Sjmallett#endif 6991215976Sjmallett } s; 6992215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn50xx; 6993215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn52xx; 6994215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1; 6995215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn56xx; 6996215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1; 6997215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn58xx; 6998215976Sjmallett struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1; 6999215976Sjmallett}; 7000215976Sjmalletttypedef union cvmx_lmcx_rodt_comp_ctl cvmx_lmcx_rodt_comp_ctl_t; 7001215976Sjmallett 7002215976Sjmallett/** 7003215976Sjmallett * cvmx_lmc#_rodt_ctl 7004215976Sjmallett * 7005215976Sjmallett * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control 7006215976Sjmallett * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports turning on ODT's in 7007215976Sjmallett * the lower 2 DIMM's with the masks as below. 7008215976Sjmallett * 7009215976Sjmallett * Notes: 7010215976Sjmallett * When a given RANK in position N is selected, the RODT _HI and _LO masks for that position are used. 7011215976Sjmallett * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1, and 0, respectively. 7012215976Sjmallett * In 64b mode, DIMMs are assumed to be ordered in the following order: 7013215976Sjmallett * position 3: [unused , DIMM1_RANK1_LO] 7014215976Sjmallett * position 2: [unused , DIMM1_RANK0_LO] 7015215976Sjmallett * position 1: [unused , DIMM0_RANK1_LO] 7016215976Sjmallett * position 0: [unused , DIMM0_RANK0_LO] 7017215976Sjmallett * In 128b mode, DIMMs are assumed to be ordered in the following order: 7018215976Sjmallett * position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO] 7019215976Sjmallett * position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO] 7020215976Sjmallett * position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO] 7021215976Sjmallett * position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO] 7022215976Sjmallett */ 7023232812Sjmallettunion cvmx_lmcx_rodt_ctl { 7024215976Sjmallett uint64_t u64; 7025232812Sjmallett struct cvmx_lmcx_rodt_ctl_s { 7026232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7027215976Sjmallett uint64_t reserved_32_63 : 32; 7028215976Sjmallett uint64_t rodt_hi3 : 4; /**< Read ODT mask for position 3, data[127:64] */ 7029215976Sjmallett uint64_t rodt_hi2 : 4; /**< Read ODT mask for position 2, data[127:64] */ 7030215976Sjmallett uint64_t rodt_hi1 : 4; /**< Read ODT mask for position 1, data[127:64] */ 7031215976Sjmallett uint64_t rodt_hi0 : 4; /**< Read ODT mask for position 0, data[127:64] */ 7032215976Sjmallett uint64_t rodt_lo3 : 4; /**< Read ODT mask for position 3, data[ 63: 0] */ 7033215976Sjmallett uint64_t rodt_lo2 : 4; /**< Read ODT mask for position 2, data[ 63: 0] */ 7034215976Sjmallett uint64_t rodt_lo1 : 4; /**< Read ODT mask for position 1, data[ 63: 0] */ 7035215976Sjmallett uint64_t rodt_lo0 : 4; /**< Read ODT mask for position 0, data[ 63: 0] */ 7036215976Sjmallett#else 7037215976Sjmallett uint64_t rodt_lo0 : 4; 7038215976Sjmallett uint64_t rodt_lo1 : 4; 7039215976Sjmallett uint64_t rodt_lo2 : 4; 7040215976Sjmallett uint64_t rodt_lo3 : 4; 7041215976Sjmallett uint64_t rodt_hi0 : 4; 7042215976Sjmallett uint64_t rodt_hi1 : 4; 7043215976Sjmallett uint64_t rodt_hi2 : 4; 7044215976Sjmallett uint64_t rodt_hi3 : 4; 7045215976Sjmallett uint64_t reserved_32_63 : 32; 7046215976Sjmallett#endif 7047215976Sjmallett } s; 7048215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn30xx; 7049215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn31xx; 7050215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn38xx; 7051215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn38xxp2; 7052215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn50xx; 7053215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn52xx; 7054215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn52xxp1; 7055215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn56xx; 7056215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn56xxp1; 7057215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn58xx; 7058215976Sjmallett struct cvmx_lmcx_rodt_ctl_s cn58xxp1; 7059215976Sjmallett}; 7060215976Sjmalletttypedef union cvmx_lmcx_rodt_ctl cvmx_lmcx_rodt_ctl_t; 7061215976Sjmallett 7062215976Sjmallett/** 7063215976Sjmallett * cvmx_lmc#_rodt_mask 7064215976Sjmallett * 7065215976Sjmallett * LMC_RODT_MASK = LMC Read OnDieTermination mask 7066215976Sjmallett * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations 7067215976Sjmallett * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in 7068215976Sjmallett * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF 7069215976Sjmallett * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts 7070215976Sjmallett * in that DIMM. System designers may prefer different combinations of ODT ON's for reads 7071215976Sjmallett * into different ranks. Octeon supports full programmability by way of the mask register below. 7072215976Sjmallett * Each Rank position has its own 8-bit programmable field. 7073215976Sjmallett * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below. 7074215976Sjmallett * For eg., When doing a read from Rank0, a system designer may desire to terminate the lines 7075215976Sjmallett * with the resistor on DIMM0/Rank1. The mask RODT_D0_R0 would then be [00000010]. 7076215976Sjmallett * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not 7077215976Sjmallett * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin 7078215976Sjmallett * for the rank that is being read should always be 0. 7079215976Sjmallett * 7080215976Sjmallett * Notes: 7081215976Sjmallett * When a given RANK is selected, the RODT mask for that RANK is used. The resulting RODT mask is 7082215976Sjmallett * driven to the DIMMs in the following manner: 7083215976Sjmallett * RANK_ENA=1 RANK_ENA=0 7084215976Sjmallett * Mask[3] -> DIMM1_ODT_1 MBZ 7085215976Sjmallett * Mask[2] -> DIMM1_ODT_0 DIMM1_ODT_0 7086215976Sjmallett * Mask[1] -> DIMM0_ODT_1 MBZ 7087215976Sjmallett * Mask[0] -> DIMM0_ODT_0 DIMM0_ODT_0 7088215976Sjmallett * 7089215976Sjmallett * LMC always reads entire cache blocks and always reads them via two consecutive 7090215976Sjmallett * read CAS operations to the same rank+bank+row spaced exactly 4 CK's apart. 7091215976Sjmallett * When a RODT mask bit is set, LMC asserts the OCTEON ODT output 7092215976Sjmallett * pin(s) starting (CL - CWL) CK's after the first read CAS operation. Then, OCTEON 7093215976Sjmallett * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[RODT_BPRCH] more CK's 7094215976Sjmallett * - for a total of 10+LMC*_CONTROL[RODT_BPRCH] CK's for the entire cache block read - 7095215976Sjmallett * through the second read CAS operation of the cache block, 7096215976Sjmallett * satisfying the 6 CK DDR3 ODTH8 requirements. 7097215976Sjmallett * But it is possible for OCTEON to issue two cache block reads separated by as few as 7098215976Sjmallett * RtR = 8 or 9 (10 if LMC*_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s) 7099215976Sjmallett * for the RODT mask of the first cache block read for RtR CK's, then asserts 7100215976Sjmallett * the ODT output pin(s) for the RODT mask of the second cache block read for 10+LMC*_CONTROL[RODT_BPRCH] CK's 7101215976Sjmallett * (or less if a third cache block read follows within 8 or 9 (or 10) CK's of this second cache block read). 7102215976Sjmallett * Note that it may be necessary to force LMC to space back-to-back cache block reads 7103215976Sjmallett * to different ranks apart by at least 10+LMC*_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations. 7104215976Sjmallett */ 7105232812Sjmallettunion cvmx_lmcx_rodt_mask { 7106215976Sjmallett uint64_t u64; 7107232812Sjmallett struct cvmx_lmcx_rodt_mask_s { 7108232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7109215976Sjmallett uint64_t rodt_d3_r1 : 8; /**< Read ODT mask DIMM3, RANK1/DIMM3 in SingleRanked 7110232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 7111215976Sjmallett uint64_t rodt_d3_r0 : 8; /**< Read ODT mask DIMM3, RANK0 7112232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 7113215976Sjmallett uint64_t rodt_d2_r1 : 8; /**< Read ODT mask DIMM2, RANK1/DIMM2 in SingleRanked 7114232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 7115215976Sjmallett uint64_t rodt_d2_r0 : 8; /**< Read ODT mask DIMM2, RANK0 7116232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 7117215976Sjmallett uint64_t rodt_d1_r1 : 8; /**< Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked 7118215976Sjmallett if (RANK_ENA) then 7119215976Sjmallett RODT_D1_R1[3] must be 0 7120215976Sjmallett else 7121215976Sjmallett RODT_D1_R1[3:0] is not used and MBZ 7122232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 7123215976Sjmallett uint64_t rodt_d1_r0 : 8; /**< Read ODT mask DIMM1, RANK0 7124215976Sjmallett if (RANK_ENA) then 7125215976Sjmallett RODT_D1_RO[2] must be 0 7126215976Sjmallett else 7127215976Sjmallett RODT_D1_RO[3:2,1] must be 0 7128232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 7129215976Sjmallett uint64_t rodt_d0_r1 : 8; /**< Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked 7130215976Sjmallett if (RANK_ENA) then 7131215976Sjmallett RODT_D0_R1[1] must be 0 7132215976Sjmallett else 7133215976Sjmallett RODT_D0_R1[3:0] is not used and MBZ 7134232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 7135215976Sjmallett uint64_t rodt_d0_r0 : 8; /**< Read ODT mask DIMM0, RANK0 7136215976Sjmallett if (RANK_ENA) then 7137215976Sjmallett RODT_D0_RO[0] must be 0 7138215976Sjmallett else 7139215976Sjmallett RODT_D0_RO[1:0,3] must be 0 7140232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 7141215976Sjmallett#else 7142215976Sjmallett uint64_t rodt_d0_r0 : 8; 7143215976Sjmallett uint64_t rodt_d0_r1 : 8; 7144215976Sjmallett uint64_t rodt_d1_r0 : 8; 7145215976Sjmallett uint64_t rodt_d1_r1 : 8; 7146215976Sjmallett uint64_t rodt_d2_r0 : 8; 7147215976Sjmallett uint64_t rodt_d2_r1 : 8; 7148215976Sjmallett uint64_t rodt_d3_r0 : 8; 7149215976Sjmallett uint64_t rodt_d3_r1 : 8; 7150215976Sjmallett#endif 7151215976Sjmallett } s; 7152232812Sjmallett struct cvmx_lmcx_rodt_mask_s cn61xx; 7153215976Sjmallett struct cvmx_lmcx_rodt_mask_s cn63xx; 7154215976Sjmallett struct cvmx_lmcx_rodt_mask_s cn63xxp1; 7155232812Sjmallett struct cvmx_lmcx_rodt_mask_s cn66xx; 7156232812Sjmallett struct cvmx_lmcx_rodt_mask_s cn68xx; 7157232812Sjmallett struct cvmx_lmcx_rodt_mask_s cn68xxp1; 7158232812Sjmallett struct cvmx_lmcx_rodt_mask_s cnf71xx; 7159215976Sjmallett}; 7160215976Sjmalletttypedef union cvmx_lmcx_rodt_mask cvmx_lmcx_rodt_mask_t; 7161215976Sjmallett 7162215976Sjmallett/** 7163232812Sjmallett * cvmx_lmc#_scramble_cfg0 7164232812Sjmallett * 7165232812Sjmallett * LMC_SCRAMBLE_CFG0 = LMC Scramble Config0 7166232812Sjmallett * 7167232812Sjmallett */ 7168232812Sjmallettunion cvmx_lmcx_scramble_cfg0 { 7169232812Sjmallett uint64_t u64; 7170232812Sjmallett struct cvmx_lmcx_scramble_cfg0_s { 7171232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7172232812Sjmallett uint64_t key : 64; /**< Scramble Key for Data */ 7173232812Sjmallett#else 7174232812Sjmallett uint64_t key : 64; 7175232812Sjmallett#endif 7176232812Sjmallett } s; 7177232812Sjmallett struct cvmx_lmcx_scramble_cfg0_s cn61xx; 7178232812Sjmallett struct cvmx_lmcx_scramble_cfg0_s cn66xx; 7179232812Sjmallett struct cvmx_lmcx_scramble_cfg0_s cnf71xx; 7180232812Sjmallett}; 7181232812Sjmalletttypedef union cvmx_lmcx_scramble_cfg0 cvmx_lmcx_scramble_cfg0_t; 7182232812Sjmallett 7183232812Sjmallett/** 7184232812Sjmallett * cvmx_lmc#_scramble_cfg1 7185232812Sjmallett * 7186232812Sjmallett * LMC_SCRAMBLE_CFG1 = LMC Scramble Config1 7187232812Sjmallett * 7188232812Sjmallett * 7189232812Sjmallett * Notes: 7190232812Sjmallett * Address scrambling usually maps addresses into the same rank. Exceptions are when LMC_NXM[CS_MASK] requires 7191232812Sjmallett * aliasing that uses the lowest, legal chip select(s). 7192232812Sjmallett */ 7193232812Sjmallettunion cvmx_lmcx_scramble_cfg1 { 7194232812Sjmallett uint64_t u64; 7195232812Sjmallett struct cvmx_lmcx_scramble_cfg1_s { 7196232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7197232812Sjmallett uint64_t key : 64; /**< Scramble Key for Addresses */ 7198232812Sjmallett#else 7199232812Sjmallett uint64_t key : 64; 7200232812Sjmallett#endif 7201232812Sjmallett } s; 7202232812Sjmallett struct cvmx_lmcx_scramble_cfg1_s cn61xx; 7203232812Sjmallett struct cvmx_lmcx_scramble_cfg1_s cn66xx; 7204232812Sjmallett struct cvmx_lmcx_scramble_cfg1_s cnf71xx; 7205232812Sjmallett}; 7206232812Sjmalletttypedef union cvmx_lmcx_scramble_cfg1 cvmx_lmcx_scramble_cfg1_t; 7207232812Sjmallett 7208232812Sjmallett/** 7209232812Sjmallett * cvmx_lmc#_scrambled_fadr 7210232812Sjmallett * 7211232812Sjmallett * LMC_SCRAMBLED_FADR = LMC Scrambled Failing Address Register (SEC/DED/NXM) 7212232812Sjmallett * 7213232812Sjmallett * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can 7214232812Sjmallett * over-write this register with its failing addresses if the first error was a SEC. If you write 7215232812Sjmallett * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the 7216232812Sjmallett * next failing address. 7217232812Sjmallett * 7218232812Sjmallett * If FDIMM is 2 that means the error is in the higher bits DIMM. 7219232812Sjmallett * 7220232812Sjmallett * Notes: 7221232812Sjmallett * LMC*_FADR captures the failing pre-scrambled address location (split into dimm, bunk, bank, etc). If 7222232812Sjmallett * scrambling is off, then LMC*_FADR will also capture the failing physical location in the DRAM parts. 7223232812Sjmallett * 7224232812Sjmallett * LMC*_SCRAMBLED_FADR captures the actual failing address location in the physical DRAM parts, i.e., 7225232812Sjmallett * a. if scrambling is on, LMC*_SCRAMBLE_FADR contains the failing physical location in the DRAM parts (split 7226232812Sjmallett * into dimm, bunk, bank, etc) 7227232812Sjmallett * b. if scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the contents of 7228232812Sjmallett * LMC*_SCRAMBLED_FADR match the contents of LMC*_FADR 7229232812Sjmallett */ 7230232812Sjmallettunion cvmx_lmcx_scrambled_fadr { 7231232812Sjmallett uint64_t u64; 7232232812Sjmallett struct cvmx_lmcx_scrambled_fadr_s { 7233232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7234232812Sjmallett uint64_t reserved_36_63 : 28; 7235232812Sjmallett uint64_t fdimm : 2; /**< Failing DIMM# */ 7236232812Sjmallett uint64_t fbunk : 1; /**< Failing Rank */ 7237232812Sjmallett uint64_t fbank : 3; /**< Failing Bank[2:0] */ 7238232812Sjmallett uint64_t frow : 16; /**< Failing Row Address[15:0] */ 7239232812Sjmallett uint64_t fcol : 14; /**< Failing Column Address[13:0] 7240232812Sjmallett Technically, represents the address of the 128b data 7241232812Sjmallett that had an ecc error, i.e., fcol[0] is always 0. Can 7242232812Sjmallett be used in conjuction with LMC*_CONFIG[DED_ERR] to 7243232812Sjmallett isolate the 64b chunk of data in error */ 7244232812Sjmallett#else 7245232812Sjmallett uint64_t fcol : 14; 7246232812Sjmallett uint64_t frow : 16; 7247232812Sjmallett uint64_t fbank : 3; 7248232812Sjmallett uint64_t fbunk : 1; 7249232812Sjmallett uint64_t fdimm : 2; 7250232812Sjmallett uint64_t reserved_36_63 : 28; 7251232812Sjmallett#endif 7252232812Sjmallett } s; 7253232812Sjmallett struct cvmx_lmcx_scrambled_fadr_s cn61xx; 7254232812Sjmallett struct cvmx_lmcx_scrambled_fadr_s cn66xx; 7255232812Sjmallett struct cvmx_lmcx_scrambled_fadr_s cnf71xx; 7256232812Sjmallett}; 7257232812Sjmalletttypedef union cvmx_lmcx_scrambled_fadr cvmx_lmcx_scrambled_fadr_t; 7258232812Sjmallett 7259232812Sjmallett/** 7260215976Sjmallett * cvmx_lmc#_slot_ctl0 7261215976Sjmallett * 7262215976Sjmallett * LMC_SLOT_CTL0 = LMC Slot Control0 7263215976Sjmallett * This register is an assortment of various control fields needed by the memory controller 7264215976Sjmallett * 7265215976Sjmallett * Notes: 7266215976Sjmallett * If SW has not previously written to this register (since the last DRESET), 7267215976Sjmallett * HW updates the fields in this register to the minimum allowed value 7268215976Sjmallett * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and 7269215976Sjmallett * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register 7270215976Sjmallett * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn 7271215976Sjmallett * have valid data. 7272215976Sjmallett * 7273215976Sjmallett * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]: 7274215976Sjmallett * - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles 7275215976Sjmallett * between when the DRAM part registers CAS commands of the 1st and 2nd types 7276215976Sjmallett * from different cache blocks. 7277215976Sjmallett * - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles 7278215976Sjmallett * between when the DRAM part registers CAS commands of the 1st and 2nd types 7279215976Sjmallett * from different cache blocks. FieldValue = 0 is always illegal in this 7280215976Sjmallett * case. 7281215976Sjmallett * 7282215976Sjmallett * The hardware-calculated minimums are: 7283215976Sjmallett * 7284215976Sjmallett * min R2R_INIT = 1 - LMC*_CONFIG[DDR2T] 7285215976Sjmallett * min R2W_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH] 7286215976Sjmallett * min W2R_INIT = 2 - LMC*_CONFIG[DDR2T] + LMC*_TIMING_PARAMS1[TWTR] + WL 7287215976Sjmallett * min W2W_INIT = 1 - LMC*_CONFIG[DDR2T] 7288215976Sjmallett * 7289215976Sjmallett * where 7290215976Sjmallett * 7291215976Sjmallett * RL = CL + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL) 7292215976Sjmallett * WL = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL) 7293215976Sjmallett * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1 (max is across all ranks i (0..3) and bytes j (0..8)) 7294215976Sjmallett * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] (min is across all ranks i (0..3) and bytes j (0..8)) 7295215976Sjmallett * 7296215976Sjmallett * R2W_INIT has 1 CK cycle built in for OCTEON-internal ODT settling/channel turnaround time. 7297215976Sjmallett */ 7298232812Sjmallettunion cvmx_lmcx_slot_ctl0 { 7299215976Sjmallett uint64_t u64; 7300232812Sjmallett struct cvmx_lmcx_slot_ctl0_s { 7301232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7302215976Sjmallett uint64_t reserved_24_63 : 40; 7303215976Sjmallett uint64_t w2w_init : 6; /**< Write-to-write spacing control 7304232812Sjmallett for back to back write followed by write cache block 7305232812Sjmallett accesses to the same rank and DIMM */ 7306215976Sjmallett uint64_t w2r_init : 6; /**< Write-to-read spacing control 7307232812Sjmallett for back to back write followed by read cache block 7308232812Sjmallett accesses to the same rank and DIMM */ 7309215976Sjmallett uint64_t r2w_init : 6; /**< Read-to-write spacing control 7310232812Sjmallett for back to back read followed by write cache block 7311232812Sjmallett accesses to the same rank and DIMM */ 7312215976Sjmallett uint64_t r2r_init : 6; /**< Read-to-read spacing control 7313232812Sjmallett for back to back read followed by read cache block 7314232812Sjmallett accesses to the same rank and DIMM */ 7315215976Sjmallett#else 7316215976Sjmallett uint64_t r2r_init : 6; 7317215976Sjmallett uint64_t r2w_init : 6; 7318215976Sjmallett uint64_t w2r_init : 6; 7319215976Sjmallett uint64_t w2w_init : 6; 7320215976Sjmallett uint64_t reserved_24_63 : 40; 7321215976Sjmallett#endif 7322215976Sjmallett } s; 7323232812Sjmallett struct cvmx_lmcx_slot_ctl0_s cn61xx; 7324215976Sjmallett struct cvmx_lmcx_slot_ctl0_s cn63xx; 7325215976Sjmallett struct cvmx_lmcx_slot_ctl0_s cn63xxp1; 7326232812Sjmallett struct cvmx_lmcx_slot_ctl0_s cn66xx; 7327232812Sjmallett struct cvmx_lmcx_slot_ctl0_s cn68xx; 7328232812Sjmallett struct cvmx_lmcx_slot_ctl0_s cn68xxp1; 7329232812Sjmallett struct cvmx_lmcx_slot_ctl0_s cnf71xx; 7330215976Sjmallett}; 7331215976Sjmalletttypedef union cvmx_lmcx_slot_ctl0 cvmx_lmcx_slot_ctl0_t; 7332215976Sjmallett 7333215976Sjmallett/** 7334215976Sjmallett * cvmx_lmc#_slot_ctl1 7335215976Sjmallett * 7336215976Sjmallett * LMC_SLOT_CTL1 = LMC Slot Control1 7337215976Sjmallett * This register is an assortment of various control fields needed by the memory controller 7338215976Sjmallett * 7339215976Sjmallett * Notes: 7340215976Sjmallett * If SW has not previously written to this register (since the last DRESET), 7341215976Sjmallett * HW updates the fields in this register to the minimum allowed value 7342215976Sjmallett * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and 7343215976Sjmallett * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register 7344215976Sjmallett * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn 7345215976Sjmallett * have valid data. 7346215976Sjmallett * 7347215976Sjmallett * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]: 7348215976Sjmallett * - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles 7349215976Sjmallett * between when the DRAM part registers CAS commands of the 1st and 2nd types 7350215976Sjmallett * from different cache blocks. 7351215976Sjmallett * - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles 7352215976Sjmallett * between when the DRAM part registers CAS commands of the 1st and 2nd types 7353215976Sjmallett * from different cache blocks. FieldValue = 0 is always illegal in this 7354215976Sjmallett * case. 7355215976Sjmallett * 7356215976Sjmallett * The hardware-calculated minimums are: 7357215976Sjmallett * 7358232812Sjmallett * min R2R_XRANK_INIT = 2 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew + LMC*_CONTROL[RODT_BPRCH] 7359215976Sjmallett * min R2W_XRANK_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH] 7360215976Sjmallett * min W2R_XRANK_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2] 7361215976Sjmallett * min W2W_XRANK_INIT = 4 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew 7362215976Sjmallett * 7363215976Sjmallett * where 7364215976Sjmallett * 7365215976Sjmallett * RL = CL + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL) 7366215976Sjmallett * WL = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL) 7367215976Sjmallett * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4) (min is across all ranks i (0..3) and bytes j (0..8)) 7368215976Sjmallett * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1 (max is across all ranks i (0..3) and bytes j (0..8)) 7369215976Sjmallett * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] (min is across all ranks i (0..3) and bytes j (0..8)) 7370215976Sjmallett * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8)) 7371215976Sjmallett * 7372215976Sjmallett * R2W_XRANK_INIT has 1 extra CK cycle built in for OCTEON-internal ODT settling/channel turnaround time. 7373215976Sjmallett * 7374215976Sjmallett * W2R_XRANK_INIT has 1 extra CK cycle built in for channel turnaround time. 7375215976Sjmallett */ 7376232812Sjmallettunion cvmx_lmcx_slot_ctl1 { 7377215976Sjmallett uint64_t u64; 7378232812Sjmallett struct cvmx_lmcx_slot_ctl1_s { 7379232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7380215976Sjmallett uint64_t reserved_24_63 : 40; 7381215976Sjmallett uint64_t w2w_xrank_init : 6; /**< Write-to-write spacing control 7382232812Sjmallett for back to back write followed by write cache block 7383232812Sjmallett accesses across ranks of the same DIMM */ 7384215976Sjmallett uint64_t w2r_xrank_init : 6; /**< Write-to-read spacing control 7385232812Sjmallett for back to back write followed by read cache block 7386232812Sjmallett accesses across ranks of the same DIMM */ 7387215976Sjmallett uint64_t r2w_xrank_init : 6; /**< Read-to-write spacing control 7388232812Sjmallett for back to back read followed by write cache block 7389232812Sjmallett accesses across ranks of the same DIMM */ 7390215976Sjmallett uint64_t r2r_xrank_init : 6; /**< Read-to-read spacing control 7391232812Sjmallett for back to back read followed by read cache block 7392232812Sjmallett accesses across ranks of the same DIMM */ 7393215976Sjmallett#else 7394215976Sjmallett uint64_t r2r_xrank_init : 6; 7395215976Sjmallett uint64_t r2w_xrank_init : 6; 7396215976Sjmallett uint64_t w2r_xrank_init : 6; 7397215976Sjmallett uint64_t w2w_xrank_init : 6; 7398215976Sjmallett uint64_t reserved_24_63 : 40; 7399215976Sjmallett#endif 7400215976Sjmallett } s; 7401232812Sjmallett struct cvmx_lmcx_slot_ctl1_s cn61xx; 7402215976Sjmallett struct cvmx_lmcx_slot_ctl1_s cn63xx; 7403215976Sjmallett struct cvmx_lmcx_slot_ctl1_s cn63xxp1; 7404232812Sjmallett struct cvmx_lmcx_slot_ctl1_s cn66xx; 7405232812Sjmallett struct cvmx_lmcx_slot_ctl1_s cn68xx; 7406232812Sjmallett struct cvmx_lmcx_slot_ctl1_s cn68xxp1; 7407232812Sjmallett struct cvmx_lmcx_slot_ctl1_s cnf71xx; 7408215976Sjmallett}; 7409215976Sjmalletttypedef union cvmx_lmcx_slot_ctl1 cvmx_lmcx_slot_ctl1_t; 7410215976Sjmallett 7411215976Sjmallett/** 7412215976Sjmallett * cvmx_lmc#_slot_ctl2 7413215976Sjmallett * 7414215976Sjmallett * LMC_SLOT_CTL2 = LMC Slot Control2 7415215976Sjmallett * This register is an assortment of various control fields needed by the memory controller 7416215976Sjmallett * 7417215976Sjmallett * Notes: 7418215976Sjmallett * If SW has not previously written to this register (since the last DRESET), 7419215976Sjmallett * HW updates the fields in this register to the minimum allowed value 7420215976Sjmallett * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and 7421215976Sjmallett * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register 7422215976Sjmallett * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn 7423215976Sjmallett * have valid data. 7424215976Sjmallett * 7425215976Sjmallett * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]: 7426215976Sjmallett * - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles 7427215976Sjmallett * between when the DRAM part registers CAS commands of the 1st and 2nd types 7428215976Sjmallett * from different cache blocks. 7429215976Sjmallett * - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles 7430215976Sjmallett * between when the DRAM part registers CAS commands of the 1st and 2nd types 7431215976Sjmallett * from different cache blocks. FieldValue = 0 is always illegal in this 7432215976Sjmallett * case. 7433215976Sjmallett * 7434215976Sjmallett * The hardware-calculated minimums are: 7435215976Sjmallett * 7436232812Sjmallett * min R2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew + LMC*_CONTROL[RODT_BPRCH] 7437215976Sjmallett * min R2W_XDIMM_INIT = 6 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH] 7438215976Sjmallett * min W2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2] 7439215976Sjmallett * min W2W_XDIMM_INIT = 5 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew 7440215976Sjmallett * 7441215976Sjmallett * where 7442215976Sjmallett * 7443215976Sjmallett * RL = CL + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL) 7444215976Sjmallett * WL = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL) 7445215976Sjmallett * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4) (min is across all ranks i (0..3) and bytes j (0..8)) 7446215976Sjmallett * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1 (max is across all ranks i (0..3) and bytes j (0..8)) 7447215976Sjmallett * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] (min is across all ranks i (0..3) and bytes j (0..8)) 7448215976Sjmallett * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8)) 7449215976Sjmallett * 7450215976Sjmallett * R2W_XDIMM_INIT has 2 extra CK cycles built in for OCTEON-internal ODT settling/channel turnaround time. 7451215976Sjmallett * 7452215976Sjmallett * R2R_XDIMM_INIT, W2R_XRANK_INIT, W2W_XDIMM_INIT have 1 extra CK cycle built in for channel turnaround time. 7453215976Sjmallett */ 7454232812Sjmallettunion cvmx_lmcx_slot_ctl2 { 7455215976Sjmallett uint64_t u64; 7456232812Sjmallett struct cvmx_lmcx_slot_ctl2_s { 7457232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7458215976Sjmallett uint64_t reserved_24_63 : 40; 7459215976Sjmallett uint64_t w2w_xdimm_init : 6; /**< Write-to-write spacing control 7460232812Sjmallett for back to back write followed by write cache block 7461232812Sjmallett accesses across DIMMs */ 7462215976Sjmallett uint64_t w2r_xdimm_init : 6; /**< Write-to-read spacing control 7463232812Sjmallett for back to back write followed by read cache block 7464232812Sjmallett accesses across DIMMs */ 7465215976Sjmallett uint64_t r2w_xdimm_init : 6; /**< Read-to-write spacing control 7466232812Sjmallett for back to back read followed by write cache block 7467232812Sjmallett accesses across DIMMs */ 7468215976Sjmallett uint64_t r2r_xdimm_init : 6; /**< Read-to-read spacing control 7469232812Sjmallett for back to back read followed by read cache block 7470232812Sjmallett accesses across DIMMs */ 7471215976Sjmallett#else 7472215976Sjmallett uint64_t r2r_xdimm_init : 6; 7473215976Sjmallett uint64_t r2w_xdimm_init : 6; 7474215976Sjmallett uint64_t w2r_xdimm_init : 6; 7475215976Sjmallett uint64_t w2w_xdimm_init : 6; 7476215976Sjmallett uint64_t reserved_24_63 : 40; 7477215976Sjmallett#endif 7478215976Sjmallett } s; 7479232812Sjmallett struct cvmx_lmcx_slot_ctl2_s cn61xx; 7480215976Sjmallett struct cvmx_lmcx_slot_ctl2_s cn63xx; 7481215976Sjmallett struct cvmx_lmcx_slot_ctl2_s cn63xxp1; 7482232812Sjmallett struct cvmx_lmcx_slot_ctl2_s cn66xx; 7483232812Sjmallett struct cvmx_lmcx_slot_ctl2_s cn68xx; 7484232812Sjmallett struct cvmx_lmcx_slot_ctl2_s cn68xxp1; 7485232812Sjmallett struct cvmx_lmcx_slot_ctl2_s cnf71xx; 7486215976Sjmallett}; 7487215976Sjmalletttypedef union cvmx_lmcx_slot_ctl2 cvmx_lmcx_slot_ctl2_t; 7488215976Sjmallett 7489215976Sjmallett/** 7490215976Sjmallett * cvmx_lmc#_timing_params0 7491215976Sjmallett */ 7492232812Sjmallettunion cvmx_lmcx_timing_params0 { 7493215976Sjmallett uint64_t u64; 7494232812Sjmallett struct cvmx_lmcx_timing_params0_s { 7495232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7496215976Sjmallett uint64_t reserved_47_63 : 17; 7497215976Sjmallett uint64_t trp_ext : 1; /**< Indicates tRP constraints. 7498215976Sjmallett Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)] 7499215976Sjmallett + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1, 7500215976Sjmallett where tRP, tRTP are from the DDR3 spec, and tCYC(ns) 7501215976Sjmallett is the DDR clock frequency (not data rate). 7502215976Sjmallett TYP tRP=10-15ns 7503215976Sjmallett TYP tRTP=max(4nCK, 7.5ns) */ 7504215976Sjmallett uint64_t tcksre : 4; /**< Indicates tCKSRE constraints. 7505215976Sjmallett Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1, 7506215976Sjmallett where tCKSRE is from the DDR3 spec, and tCYC(ns) 7507215976Sjmallett is the DDR clock frequency (not data rate). 7508215976Sjmallett TYP=max(5nCK, 10ns) */ 7509215976Sjmallett uint64_t trp : 4; /**< Indicates tRP constraints. 7510232812Sjmallett Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)] 7511215976Sjmallett + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1, 7512215976Sjmallett where tRP, tRTP are from the DDR3 spec, and tCYC(ns) 7513215976Sjmallett is the DDR clock frequency (not data rate). 7514215976Sjmallett TYP tRP=10-15ns 7515215976Sjmallett TYP tRTP=max(4nCK, 7.5ns) */ 7516215976Sjmallett uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints. 7517215976Sjmallett Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))], 7518215976Sjmallett where tZQINIT is from the DDR3 spec, and tCYC(ns) 7519215976Sjmallett is the DDR clock frequency (not data rate). 7520215976Sjmallett TYP=2 (equivalent to 512) */ 7521215976Sjmallett uint64_t tdllk : 4; /**< Indicates tDLLK constraints. 7522215976Sjmallett Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))], 7523215976Sjmallett where tDLLK is from the DDR3 spec, and tCYC(ns) 7524215976Sjmallett is the DDR clock frequency (not data rate). 7525215976Sjmallett TYP=2 (equivalent to 512) 7526215976Sjmallett This parameter is used in self-refresh exit 7527215976Sjmallett and assumed to be greater than tRFC */ 7528215976Sjmallett uint64_t tmod : 4; /**< Indicates tMOD constraints. 7529215976Sjmallett Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1, 7530215976Sjmallett where tMOD is from the DDR3 spec, and tCYC(ns) 7531215976Sjmallett is the DDR clock frequency (not data rate). 7532215976Sjmallett TYP=max(12nCK, 15ns) */ 7533215976Sjmallett uint64_t tmrd : 4; /**< Indicates tMRD constraints. 7534215976Sjmallett Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1, 7535215976Sjmallett where tMRD is from the DDR3 spec, and tCYC(ns) 7536215976Sjmallett is the DDR clock frequency (not data rate). 7537215976Sjmallett TYP=4nCK */ 7538215976Sjmallett uint64_t txpr : 4; /**< Indicates tXPR constraints. 7539215976Sjmallett Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))], 7540215976Sjmallett where tXPR is from the DDR3 spec, and tCYC(ns) 7541215976Sjmallett is the DDR clock frequency (not data rate). 7542215976Sjmallett TYP=max(5nCK, tRFC+10ns) */ 7543215976Sjmallett uint64_t tcke : 4; /**< Indicates tCKE constraints. 7544215976Sjmallett Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1, 7545215976Sjmallett where tCKE is from the DDR3 spec, and tCYC(ns) 7546215976Sjmallett is the DDR clock frequency (not data rate). 7547215976Sjmallett TYP=max(3nCK, 7.5/5.625/5.625/5ns) */ 7548215976Sjmallett uint64_t tzqcs : 4; /**< Indicates tZQCS constraints. 7549215976Sjmallett Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))], 7550215976Sjmallett where tZQCS is from the DDR3 spec, and tCYC(ns) 7551215976Sjmallett is the DDR clock frequency (not data rate). 7552215976Sjmallett TYP=4 (equivalent to 64) */ 7553215976Sjmallett uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */ 7554215976Sjmallett#else 7555215976Sjmallett uint64_t tckeon : 10; 7556215976Sjmallett uint64_t tzqcs : 4; 7557215976Sjmallett uint64_t tcke : 4; 7558215976Sjmallett uint64_t txpr : 4; 7559215976Sjmallett uint64_t tmrd : 4; 7560215976Sjmallett uint64_t tmod : 4; 7561215976Sjmallett uint64_t tdllk : 4; 7562215976Sjmallett uint64_t tzqinit : 4; 7563215976Sjmallett uint64_t trp : 4; 7564215976Sjmallett uint64_t tcksre : 4; 7565215976Sjmallett uint64_t trp_ext : 1; 7566215976Sjmallett uint64_t reserved_47_63 : 17; 7567215976Sjmallett#endif 7568215976Sjmallett } s; 7569232812Sjmallett struct cvmx_lmcx_timing_params0_cn61xx { 7570232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7571215976Sjmallett uint64_t reserved_47_63 : 17; 7572215976Sjmallett uint64_t trp_ext : 1; /**< Indicates tRP constraints. 7573215976Sjmallett Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)] 7574215976Sjmallett + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1, 7575215976Sjmallett where tRP, tRTP are from the DDR3 spec, and tCYC(ns) 7576215976Sjmallett is the DDR clock frequency (not data rate). 7577215976Sjmallett TYP tRP=10-15ns 7578215976Sjmallett TYP tRTP=max(4nCK, 7.5ns) */ 7579215976Sjmallett uint64_t tcksre : 4; /**< Indicates tCKSRE constraints. 7580215976Sjmallett Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1, 7581215976Sjmallett where tCKSRE is from the DDR3 spec, and tCYC(ns) 7582215976Sjmallett is the DDR clock frequency (not data rate). 7583215976Sjmallett TYP=max(5nCK, 10ns) */ 7584215976Sjmallett uint64_t trp : 4; /**< Indicates tRP constraints. 7585215976Sjmallett Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)] 7586215976Sjmallett + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1, 7587215976Sjmallett where tRP, tRTP are from the DDR3 spec, and tCYC(ns) 7588215976Sjmallett is the DDR clock frequency (not data rate). 7589215976Sjmallett TYP tRP=10-15ns 7590215976Sjmallett TYP tRTP=max(4nCK, 7.5ns) */ 7591215976Sjmallett uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints. 7592215976Sjmallett Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))], 7593215976Sjmallett where tZQINIT is from the DDR3 spec, and tCYC(ns) 7594215976Sjmallett is the DDR clock frequency (not data rate). 7595215976Sjmallett TYP=2 (equivalent to 512) */ 7596215976Sjmallett uint64_t tdllk : 4; /**< Indicates tDLLK constraints. 7597215976Sjmallett Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))], 7598215976Sjmallett where tDLLK is from the DDR3 spec, and tCYC(ns) 7599215976Sjmallett is the DDR clock frequency (not data rate). 7600215976Sjmallett TYP=2 (equivalent to 512) 7601215976Sjmallett This parameter is used in self-refresh exit 7602215976Sjmallett and assumed to be greater than tRFC */ 7603215976Sjmallett uint64_t tmod : 4; /**< Indicates tMOD constraints. 7604215976Sjmallett Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1, 7605215976Sjmallett where tMOD is from the DDR3 spec, and tCYC(ns) 7606215976Sjmallett is the DDR clock frequency (not data rate). 7607215976Sjmallett TYP=max(12nCK, 15ns) */ 7608215976Sjmallett uint64_t tmrd : 4; /**< Indicates tMRD constraints. 7609215976Sjmallett Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1, 7610215976Sjmallett where tMRD is from the DDR3 spec, and tCYC(ns) 7611215976Sjmallett is the DDR clock frequency (not data rate). 7612215976Sjmallett TYP=4nCK */ 7613215976Sjmallett uint64_t txpr : 4; /**< Indicates tXPR constraints. 7614215976Sjmallett Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))], 7615215976Sjmallett where tXPR is from the DDR3 spec, and tCYC(ns) 7616215976Sjmallett is the DDR clock frequency (not data rate). 7617215976Sjmallett TYP=max(5nCK, tRFC+10ns) */ 7618215976Sjmallett uint64_t tcke : 4; /**< Indicates tCKE constraints. 7619215976Sjmallett Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1, 7620215976Sjmallett where tCKE is from the DDR3 spec, and tCYC(ns) 7621215976Sjmallett is the DDR clock frequency (not data rate). 7622215976Sjmallett TYP=max(3nCK, 7.5/5.625/5.625/5ns) */ 7623215976Sjmallett uint64_t tzqcs : 4; /**< Indicates tZQCS constraints. 7624215976Sjmallett Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))], 7625215976Sjmallett where tZQCS is from the DDR3 spec, and tCYC(ns) 7626215976Sjmallett is the DDR clock frequency (not data rate). 7627215976Sjmallett TYP=4 (equivalent to 64) */ 7628215976Sjmallett uint64_t reserved_0_9 : 10; 7629215976Sjmallett#else 7630215976Sjmallett uint64_t reserved_0_9 : 10; 7631215976Sjmallett uint64_t tzqcs : 4; 7632215976Sjmallett uint64_t tcke : 4; 7633215976Sjmallett uint64_t txpr : 4; 7634215976Sjmallett uint64_t tmrd : 4; 7635215976Sjmallett uint64_t tmod : 4; 7636215976Sjmallett uint64_t tdllk : 4; 7637215976Sjmallett uint64_t tzqinit : 4; 7638215976Sjmallett uint64_t trp : 4; 7639215976Sjmallett uint64_t tcksre : 4; 7640215976Sjmallett uint64_t trp_ext : 1; 7641215976Sjmallett uint64_t reserved_47_63 : 17; 7642215976Sjmallett#endif 7643232812Sjmallett } cn61xx; 7644232812Sjmallett struct cvmx_lmcx_timing_params0_cn61xx cn63xx; 7645232812Sjmallett struct cvmx_lmcx_timing_params0_cn63xxp1 { 7646232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7647215976Sjmallett uint64_t reserved_46_63 : 18; 7648215976Sjmallett uint64_t tcksre : 4; /**< Indicates tCKSRE constraints. 7649215976Sjmallett Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1, 7650215976Sjmallett where tCKSRE is from the DDR3 spec, and tCYC(ns) 7651215976Sjmallett is the DDR clock frequency (not data rate). 7652215976Sjmallett TYP=max(5nCK, 10ns) */ 7653215976Sjmallett uint64_t trp : 4; /**< Indicates tRP constraints. 7654215976Sjmallett Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)] 7655215976Sjmallett + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1, 7656215976Sjmallett where tRP, tRTP are from the DDR3 spec, and tCYC(ns) 7657215976Sjmallett is the DDR clock frequency (not data rate). 7658215976Sjmallett TYP tRP=10-15ns 7659215976Sjmallett TYP tRTP=max(4nCK, 7.5ns) */ 7660215976Sjmallett uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints. 7661215976Sjmallett Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))], 7662215976Sjmallett where tZQINIT is from the DDR3 spec, and tCYC(ns) 7663215976Sjmallett is the DDR clock frequency (not data rate). 7664215976Sjmallett TYP=2 (equivalent to 512) */ 7665215976Sjmallett uint64_t tdllk : 4; /**< Indicates tDLLK constraints. 7666215976Sjmallett Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))], 7667215976Sjmallett where tDLLK is from the DDR3 spec, and tCYC(ns) 7668215976Sjmallett is the DDR clock frequency (not data rate). 7669215976Sjmallett TYP=2 (equivalent to 512) 7670215976Sjmallett This parameter is used in self-refresh exit 7671215976Sjmallett and assumed to be greater than tRFC */ 7672215976Sjmallett uint64_t tmod : 4; /**< Indicates tMOD constraints. 7673215976Sjmallett Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1, 7674215976Sjmallett where tMOD is from the DDR3 spec, and tCYC(ns) 7675215976Sjmallett is the DDR clock frequency (not data rate). 7676215976Sjmallett TYP=max(12nCK, 15ns) */ 7677215976Sjmallett uint64_t tmrd : 4; /**< Indicates tMRD constraints. 7678215976Sjmallett Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1, 7679215976Sjmallett where tMRD is from the DDR3 spec, and tCYC(ns) 7680215976Sjmallett is the DDR clock frequency (not data rate). 7681215976Sjmallett TYP=4nCK */ 7682215976Sjmallett uint64_t txpr : 4; /**< Indicates tXPR constraints. 7683215976Sjmallett Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))], 7684215976Sjmallett where tXPR is from the DDR3 spec, and tCYC(ns) 7685215976Sjmallett is the DDR clock frequency (not data rate). 7686215976Sjmallett TYP=max(5nCK, tRFC+10ns) */ 7687215976Sjmallett uint64_t tcke : 4; /**< Indicates tCKE constraints. 7688215976Sjmallett Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1, 7689215976Sjmallett where tCKE is from the DDR3 spec, and tCYC(ns) 7690215976Sjmallett is the DDR clock frequency (not data rate). 7691215976Sjmallett TYP=max(3nCK, 7.5/5.625/5.625/5ns) */ 7692215976Sjmallett uint64_t tzqcs : 4; /**< Indicates tZQCS constraints. 7693215976Sjmallett Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))], 7694215976Sjmallett where tZQCS is from the DDR3 spec, and tCYC(ns) 7695215976Sjmallett is the DDR clock frequency (not data rate). 7696215976Sjmallett TYP=4 (equivalent to 64) */ 7697215976Sjmallett uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */ 7698215976Sjmallett#else 7699215976Sjmallett uint64_t tckeon : 10; 7700215976Sjmallett uint64_t tzqcs : 4; 7701215976Sjmallett uint64_t tcke : 4; 7702215976Sjmallett uint64_t txpr : 4; 7703215976Sjmallett uint64_t tmrd : 4; 7704215976Sjmallett uint64_t tmod : 4; 7705215976Sjmallett uint64_t tdllk : 4; 7706215976Sjmallett uint64_t tzqinit : 4; 7707215976Sjmallett uint64_t trp : 4; 7708215976Sjmallett uint64_t tcksre : 4; 7709215976Sjmallett uint64_t reserved_46_63 : 18; 7710215976Sjmallett#endif 7711215976Sjmallett } cn63xxp1; 7712232812Sjmallett struct cvmx_lmcx_timing_params0_cn61xx cn66xx; 7713232812Sjmallett struct cvmx_lmcx_timing_params0_cn61xx cn68xx; 7714232812Sjmallett struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1; 7715232812Sjmallett struct cvmx_lmcx_timing_params0_cn61xx cnf71xx; 7716215976Sjmallett}; 7717215976Sjmalletttypedef union cvmx_lmcx_timing_params0 cvmx_lmcx_timing_params0_t; 7718215976Sjmallett 7719215976Sjmallett/** 7720215976Sjmallett * cvmx_lmc#_timing_params1 7721215976Sjmallett */ 7722232812Sjmallettunion cvmx_lmcx_timing_params1 { 7723215976Sjmallett uint64_t u64; 7724232812Sjmallett struct cvmx_lmcx_timing_params1_s { 7725232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7726215976Sjmallett uint64_t reserved_47_63 : 17; 7727215976Sjmallett uint64_t tras_ext : 1; /**< Indicates tRAS constraints. 7728215976Sjmallett Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1, 7729215976Sjmallett where tRAS is from the DDR3 spec, and tCYC(ns) 7730215976Sjmallett is the DDR clock frequency (not data rate). 7731215976Sjmallett TYP=35ns-9*tREFI 7732215976Sjmallett - 000000: RESERVED 7733215976Sjmallett - 000001: 2 tCYC 7734215976Sjmallett - 000010: 3 tCYC 7735215976Sjmallett - ... 7736215976Sjmallett - 111111: 64 tCYC */ 7737215976Sjmallett uint64_t txpdll : 5; /**< Indicates tXPDLL constraints. 7738215976Sjmallett Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1, 7739215976Sjmallett where tXPDLL is from the DDR3 spec, and tCYC(ns) 7740215976Sjmallett is the DDR clock frequency (not data rate). 7741215976Sjmallett TYP=max(10nCK, 24ns) */ 7742215976Sjmallett uint64_t tfaw : 5; /**< Indicates tFAW constraints. 7743215976Sjmallett Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))], 7744215976Sjmallett where tFAW is from the DDR3 spec, and tCYC(ns) 7745215976Sjmallett is the DDR clock frequency (not data rate). 7746215976Sjmallett TYP=30-40ns */ 7747215976Sjmallett uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints. 7748215976Sjmallett Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))], 7749215976Sjmallett where tWLDQSEN is from the DDR3 spec, and tCYC(ns) 7750215976Sjmallett is the DDR clock frequency (not data rate). 7751215976Sjmallett TYP=max(25nCK) */ 7752215976Sjmallett uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints. 7753215976Sjmallett Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))], 7754215976Sjmallett where tWLMRD is from the DDR3 spec, and tCYC(ns) 7755215976Sjmallett is the DDR clock frequency (not data rate). 7756215976Sjmallett TYP=max(40nCK) */ 7757215976Sjmallett uint64_t txp : 3; /**< Indicates tXP constraints. 7758215976Sjmallett Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1, 7759215976Sjmallett where tXP is from the DDR3 spec, and tCYC(ns) 7760215976Sjmallett is the DDR clock frequency (not data rate). 7761215976Sjmallett TYP=max(3nCK, 7.5ns) */ 7762215976Sjmallett uint64_t trrd : 3; /**< Indicates tRRD constraints. 7763215976Sjmallett Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2, 7764215976Sjmallett where tRRD is from the DDR3 spec, and tCYC(ns) 7765215976Sjmallett is the DDR clock frequency (not data rate). 7766215976Sjmallett TYP=max(4nCK, 10ns) 7767215976Sjmallett - 000: RESERVED 7768215976Sjmallett - 001: 3 tCYC 7769215976Sjmallett - ... 7770215976Sjmallett - 110: 8 tCYC 7771215976Sjmallett - 111: 9 tCYC */ 7772215976Sjmallett uint64_t trfc : 5; /**< Indicates tRFC constraints. 7773215976Sjmallett Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))], 7774215976Sjmallett where tRFC is from the DDR3 spec, and tCYC(ns) 7775215976Sjmallett is the DDR clock frequency (not data rate). 7776215976Sjmallett TYP=90-350ns 7777215976Sjmallett - 00000: RESERVED 7778215976Sjmallett - 00001: 8 tCYC 7779215976Sjmallett - 00010: 16 tCYC 7780215976Sjmallett - 00011: 24 tCYC 7781215976Sjmallett - 00100: 32 tCYC 7782215976Sjmallett - ... 7783215976Sjmallett - 11110: 240 tCYC 7784215976Sjmallett - 11111: 248 tCYC */ 7785215976Sjmallett uint64_t twtr : 4; /**< Indicates tWTR constraints. 7786215976Sjmallett Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1, 7787215976Sjmallett where tWTR is from the DDR3 spec, and tCYC(ns) 7788215976Sjmallett is the DDR clock frequency (not data rate). 7789215976Sjmallett TYP=max(4nCK, 7.5ns) 7790215976Sjmallett - 0000: RESERVED 7791215976Sjmallett - 0001: 2 7792215976Sjmallett - ... 7793215976Sjmallett - 0111: 8 7794215976Sjmallett - 1000-1111: RESERVED */ 7795215976Sjmallett uint64_t trcd : 4; /**< Indicates tRCD constraints. 7796215976Sjmallett Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)], 7797215976Sjmallett where tRCD is from the DDR3 spec, and tCYC(ns) 7798215976Sjmallett is the DDR clock frequency (not data rate). 7799215976Sjmallett TYP=10-15ns 7800215976Sjmallett - 0000: RESERVED 7801215976Sjmallett - 0001: 2 (2 is the smallest value allowed) 7802215976Sjmallett - 0002: 2 7803215976Sjmallett - ... 7804232812Sjmallett - 1110: 14 7805232812Sjmallett - 1111: RESERVED 7806215976Sjmallett In 2T mode, make this register TRCD-1, not going 7807215976Sjmallett below 2. */ 7808215976Sjmallett uint64_t tras : 5; /**< Indicates tRAS constraints. 7809232812Sjmallett Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1, 7810215976Sjmallett where tRAS is from the DDR3 spec, and tCYC(ns) 7811215976Sjmallett is the DDR clock frequency (not data rate). 7812215976Sjmallett TYP=35ns-9*tREFI 7813232812Sjmallett - 000000: RESERVED 7814232812Sjmallett - 000001: 2 tCYC 7815232812Sjmallett - 000010: 3 tCYC 7816215976Sjmallett - ... 7817232812Sjmallett - 111111: 64 tCYC */ 7818215976Sjmallett uint64_t tmprr : 4; /**< Indicates tMPRR constraints. 7819215976Sjmallett Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1, 7820215976Sjmallett where tMPRR is from the DDR3 spec, and tCYC(ns) 7821215976Sjmallett is the DDR clock frequency (not data rate). 7822215976Sjmallett TYP=1nCK */ 7823215976Sjmallett#else 7824215976Sjmallett uint64_t tmprr : 4; 7825215976Sjmallett uint64_t tras : 5; 7826215976Sjmallett uint64_t trcd : 4; 7827215976Sjmallett uint64_t twtr : 4; 7828215976Sjmallett uint64_t trfc : 5; 7829215976Sjmallett uint64_t trrd : 3; 7830215976Sjmallett uint64_t txp : 3; 7831215976Sjmallett uint64_t twlmrd : 4; 7832215976Sjmallett uint64_t twldqsen : 4; 7833215976Sjmallett uint64_t tfaw : 5; 7834215976Sjmallett uint64_t txpdll : 5; 7835215976Sjmallett uint64_t tras_ext : 1; 7836215976Sjmallett uint64_t reserved_47_63 : 17; 7837215976Sjmallett#endif 7838215976Sjmallett } s; 7839232812Sjmallett struct cvmx_lmcx_timing_params1_s cn61xx; 7840215976Sjmallett struct cvmx_lmcx_timing_params1_s cn63xx; 7841232812Sjmallett struct cvmx_lmcx_timing_params1_cn63xxp1 { 7842232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7843215976Sjmallett uint64_t reserved_46_63 : 18; 7844215976Sjmallett uint64_t txpdll : 5; /**< Indicates tXPDLL constraints. 7845215976Sjmallett Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1, 7846215976Sjmallett where tXPDLL is from the DDR3 spec, and tCYC(ns) 7847215976Sjmallett is the DDR clock frequency (not data rate). 7848215976Sjmallett TYP=max(10nCK, 24ns) */ 7849215976Sjmallett uint64_t tfaw : 5; /**< Indicates tFAW constraints. 7850215976Sjmallett Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))], 7851215976Sjmallett where tFAW is from the DDR3 spec, and tCYC(ns) 7852215976Sjmallett is the DDR clock frequency (not data rate). 7853215976Sjmallett TYP=30-40ns */ 7854215976Sjmallett uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints. 7855215976Sjmallett Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))], 7856215976Sjmallett where tWLDQSEN is from the DDR3 spec, and tCYC(ns) 7857215976Sjmallett is the DDR clock frequency (not data rate). 7858215976Sjmallett TYP=max(25nCK) */ 7859215976Sjmallett uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints. 7860215976Sjmallett Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))], 7861215976Sjmallett where tWLMRD is from the DDR3 spec, and tCYC(ns) 7862215976Sjmallett is the DDR clock frequency (not data rate). 7863215976Sjmallett TYP=max(40nCK) */ 7864215976Sjmallett uint64_t txp : 3; /**< Indicates tXP constraints. 7865215976Sjmallett Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1, 7866215976Sjmallett where tXP is from the DDR3 spec, and tCYC(ns) 7867215976Sjmallett is the DDR clock frequency (not data rate). 7868215976Sjmallett TYP=max(3nCK, 7.5ns) */ 7869215976Sjmallett uint64_t trrd : 3; /**< Indicates tRRD constraints. 7870215976Sjmallett Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2, 7871215976Sjmallett where tRRD is from the DDR3 spec, and tCYC(ns) 7872215976Sjmallett is the DDR clock frequency (not data rate). 7873215976Sjmallett TYP=max(4nCK, 10ns) 7874215976Sjmallett - 000: RESERVED 7875215976Sjmallett - 001: 3 tCYC 7876215976Sjmallett - ... 7877215976Sjmallett - 110: 8 tCYC 7878215976Sjmallett - 111: 9 tCYC */ 7879215976Sjmallett uint64_t trfc : 5; /**< Indicates tRFC constraints. 7880215976Sjmallett Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))], 7881215976Sjmallett where tRFC is from the DDR3 spec, and tCYC(ns) 7882215976Sjmallett is the DDR clock frequency (not data rate). 7883215976Sjmallett TYP=90-350ns 7884215976Sjmallett - 00000: RESERVED 7885215976Sjmallett - 00001: 8 tCYC 7886215976Sjmallett - 00010: 16 tCYC 7887215976Sjmallett - 00011: 24 tCYC 7888215976Sjmallett - 00100: 32 tCYC 7889215976Sjmallett - ... 7890215976Sjmallett - 11110: 240 tCYC 7891215976Sjmallett - 11111: 248 tCYC */ 7892215976Sjmallett uint64_t twtr : 4; /**< Indicates tWTR constraints. 7893215976Sjmallett Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1, 7894215976Sjmallett where tWTR is from the DDR3 spec, and tCYC(ns) 7895215976Sjmallett is the DDR clock frequency (not data rate). 7896215976Sjmallett TYP=max(4nCK, 7.5ns) 7897215976Sjmallett - 0000: RESERVED 7898215976Sjmallett - 0001: 2 7899215976Sjmallett - ... 7900215976Sjmallett - 0111: 8 7901215976Sjmallett - 1000-1111: RESERVED */ 7902215976Sjmallett uint64_t trcd : 4; /**< Indicates tRCD constraints. 7903215976Sjmallett Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)], 7904215976Sjmallett where tRCD is from the DDR3 spec, and tCYC(ns) 7905215976Sjmallett is the DDR clock frequency (not data rate). 7906215976Sjmallett TYP=10-15ns 7907215976Sjmallett - 0000: RESERVED 7908215976Sjmallett - 0001: 2 (2 is the smallest value allowed) 7909215976Sjmallett - 0002: 2 7910215976Sjmallett - ... 7911215976Sjmallett - 1001: 9 7912215976Sjmallett - 1010-1111: RESERVED 7913215976Sjmallett In 2T mode, make this register TRCD-1, not going 7914215976Sjmallett below 2. */ 7915215976Sjmallett uint64_t tras : 5; /**< Indicates tRAS constraints. 7916215976Sjmallett Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1, 7917215976Sjmallett where tRAS is from the DDR3 spec, and tCYC(ns) 7918215976Sjmallett is the DDR clock frequency (not data rate). 7919215976Sjmallett TYP=35ns-9*tREFI 7920215976Sjmallett - 00000: RESERVED 7921215976Sjmallett - 00001: 2 tCYC 7922215976Sjmallett - 00010: 3 tCYC 7923215976Sjmallett - ... 7924215976Sjmallett - 11111: 32 tCYC */ 7925215976Sjmallett uint64_t tmprr : 4; /**< Indicates tMPRR constraints. 7926215976Sjmallett Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1, 7927215976Sjmallett where tMPRR is from the DDR3 spec, and tCYC(ns) 7928215976Sjmallett is the DDR clock frequency (not data rate). 7929215976Sjmallett TYP=1nCK */ 7930215976Sjmallett#else 7931215976Sjmallett uint64_t tmprr : 4; 7932215976Sjmallett uint64_t tras : 5; 7933215976Sjmallett uint64_t trcd : 4; 7934215976Sjmallett uint64_t twtr : 4; 7935215976Sjmallett uint64_t trfc : 5; 7936215976Sjmallett uint64_t trrd : 3; 7937215976Sjmallett uint64_t txp : 3; 7938215976Sjmallett uint64_t twlmrd : 4; 7939215976Sjmallett uint64_t twldqsen : 4; 7940215976Sjmallett uint64_t tfaw : 5; 7941215976Sjmallett uint64_t txpdll : 5; 7942215976Sjmallett uint64_t reserved_46_63 : 18; 7943215976Sjmallett#endif 7944215976Sjmallett } cn63xxp1; 7945232812Sjmallett struct cvmx_lmcx_timing_params1_s cn66xx; 7946232812Sjmallett struct cvmx_lmcx_timing_params1_s cn68xx; 7947232812Sjmallett struct cvmx_lmcx_timing_params1_s cn68xxp1; 7948232812Sjmallett struct cvmx_lmcx_timing_params1_s cnf71xx; 7949215976Sjmallett}; 7950215976Sjmalletttypedef union cvmx_lmcx_timing_params1 cvmx_lmcx_timing_params1_t; 7951215976Sjmallett 7952215976Sjmallett/** 7953215976Sjmallett * cvmx_lmc#_tro_ctl 7954215976Sjmallett * 7955215976Sjmallett * LMC_TRO_CTL = LMC Temperature Ring Osc Control 7956215976Sjmallett * This register is an assortment of various control fields needed to control the temperature ring oscillator 7957215976Sjmallett * 7958215976Sjmallett * Notes: 7959215976Sjmallett * To bring up the temperature ring oscillator, write TRESET to 0, and follow by initializing RCLK_CNT to desired 7960215976Sjmallett * value 7961215976Sjmallett */ 7962232812Sjmallettunion cvmx_lmcx_tro_ctl { 7963215976Sjmallett uint64_t u64; 7964232812Sjmallett struct cvmx_lmcx_tro_ctl_s { 7965232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7966215976Sjmallett uint64_t reserved_33_63 : 31; 7967215976Sjmallett uint64_t rclk_cnt : 32; /**< rclk counter */ 7968215976Sjmallett uint64_t treset : 1; /**< Reset ring oscillator */ 7969215976Sjmallett#else 7970215976Sjmallett uint64_t treset : 1; 7971215976Sjmallett uint64_t rclk_cnt : 32; 7972215976Sjmallett uint64_t reserved_33_63 : 31; 7973215976Sjmallett#endif 7974215976Sjmallett } s; 7975232812Sjmallett struct cvmx_lmcx_tro_ctl_s cn61xx; 7976215976Sjmallett struct cvmx_lmcx_tro_ctl_s cn63xx; 7977215976Sjmallett struct cvmx_lmcx_tro_ctl_s cn63xxp1; 7978232812Sjmallett struct cvmx_lmcx_tro_ctl_s cn66xx; 7979232812Sjmallett struct cvmx_lmcx_tro_ctl_s cn68xx; 7980232812Sjmallett struct cvmx_lmcx_tro_ctl_s cn68xxp1; 7981232812Sjmallett struct cvmx_lmcx_tro_ctl_s cnf71xx; 7982215976Sjmallett}; 7983215976Sjmalletttypedef union cvmx_lmcx_tro_ctl cvmx_lmcx_tro_ctl_t; 7984215976Sjmallett 7985215976Sjmallett/** 7986215976Sjmallett * cvmx_lmc#_tro_stat 7987215976Sjmallett * 7988215976Sjmallett * LMC_TRO_STAT = LMC Temperature Ring Osc Status 7989215976Sjmallett * This register is an assortment of various control fields needed to control the temperature ring oscillator 7990215976Sjmallett */ 7991232812Sjmallettunion cvmx_lmcx_tro_stat { 7992215976Sjmallett uint64_t u64; 7993232812Sjmallett struct cvmx_lmcx_tro_stat_s { 7994232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 7995215976Sjmallett uint64_t reserved_32_63 : 32; 7996215976Sjmallett uint64_t ring_cnt : 32; /**< ring counter */ 7997215976Sjmallett#else 7998215976Sjmallett uint64_t ring_cnt : 32; 7999215976Sjmallett uint64_t reserved_32_63 : 32; 8000215976Sjmallett#endif 8001215976Sjmallett } s; 8002232812Sjmallett struct cvmx_lmcx_tro_stat_s cn61xx; 8003215976Sjmallett struct cvmx_lmcx_tro_stat_s cn63xx; 8004215976Sjmallett struct cvmx_lmcx_tro_stat_s cn63xxp1; 8005232812Sjmallett struct cvmx_lmcx_tro_stat_s cn66xx; 8006232812Sjmallett struct cvmx_lmcx_tro_stat_s cn68xx; 8007232812Sjmallett struct cvmx_lmcx_tro_stat_s cn68xxp1; 8008232812Sjmallett struct cvmx_lmcx_tro_stat_s cnf71xx; 8009215976Sjmallett}; 8010215976Sjmalletttypedef union cvmx_lmcx_tro_stat cvmx_lmcx_tro_stat_t; 8011215976Sjmallett 8012215976Sjmallett/** 8013215976Sjmallett * cvmx_lmc#_wlevel_ctl 8014215976Sjmallett */ 8015232812Sjmallettunion cvmx_lmcx_wlevel_ctl { 8016215976Sjmallett uint64_t u64; 8017232812Sjmallett struct cvmx_lmcx_wlevel_ctl_s { 8018232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8019215976Sjmallett uint64_t reserved_22_63 : 42; 8020215976Sjmallett uint64_t rtt_nom : 3; /**< RTT_NOM 8021215976Sjmallett LMC writes a decoded value to MR1[Rtt_Nom] of the rank during 8022215976Sjmallett write leveling. Per JEDEC DDR3 specifications, 8023215976Sjmallett only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) 8024215976Sjmallett are allowed during write leveling with output buffer enabled. 8025215976Sjmallett 000 : LMC writes 001 (RZQ/4) to MR1[Rtt_Nom] 8026215976Sjmallett 001 : LMC writes 010 (RZQ/2) to MR1[Rtt_Nom] 8027215976Sjmallett 010 : LMC writes 011 (RZQ/6) to MR1[Rtt_Nom] 8028215976Sjmallett 011 : LMC writes 100 (RZQ/12) to MR1[Rtt_Nom] 8029215976Sjmallett 100 : LMC writes 101 (RZQ/8) to MR1[Rtt_Nom] 8030215976Sjmallett 101 : LMC writes 110 (Rsvd) to MR1[Rtt_Nom] 8031215976Sjmallett 110 : LMC writes 111 (Rsvd) to MR1[Rtt_Nom] 8032215976Sjmallett 111 : LMC writes 000 (Disabled) to MR1[Rtt_Nom] */ 8033215976Sjmallett uint64_t bitmask : 8; /**< Mask to select bit lanes on which write-leveling 8034215976Sjmallett feedback is returned when OR_DIS is set to 1 */ 8035215976Sjmallett uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing 8036215976Sjmallett the write-leveling bitmask */ 8037215976Sjmallett uint64_t sset : 1; /**< Run write-leveling on the current setting only. */ 8038215976Sjmallett uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by 8039215976Sjmallett the write-leveling sequence 8040215976Sjmallett Used with x16 parts where the upper and lower byte 8041215976Sjmallett lanes need to be leveled independently */ 8042215976Sjmallett#else 8043215976Sjmallett uint64_t lanemask : 9; 8044215976Sjmallett uint64_t sset : 1; 8045215976Sjmallett uint64_t or_dis : 1; 8046215976Sjmallett uint64_t bitmask : 8; 8047215976Sjmallett uint64_t rtt_nom : 3; 8048215976Sjmallett uint64_t reserved_22_63 : 42; 8049215976Sjmallett#endif 8050215976Sjmallett } s; 8051232812Sjmallett struct cvmx_lmcx_wlevel_ctl_s cn61xx; 8052215976Sjmallett struct cvmx_lmcx_wlevel_ctl_s cn63xx; 8053232812Sjmallett struct cvmx_lmcx_wlevel_ctl_cn63xxp1 { 8054232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8055215976Sjmallett uint64_t reserved_10_63 : 54; 8056215976Sjmallett uint64_t sset : 1; /**< Run write-leveling on the current setting only. */ 8057215976Sjmallett uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by 8058215976Sjmallett the write-leveling sequence 8059215976Sjmallett Used with x16 parts where the upper and lower byte 8060215976Sjmallett lanes need to be leveled independently */ 8061215976Sjmallett#else 8062215976Sjmallett uint64_t lanemask : 9; 8063215976Sjmallett uint64_t sset : 1; 8064215976Sjmallett uint64_t reserved_10_63 : 54; 8065215976Sjmallett#endif 8066215976Sjmallett } cn63xxp1; 8067232812Sjmallett struct cvmx_lmcx_wlevel_ctl_s cn66xx; 8068232812Sjmallett struct cvmx_lmcx_wlevel_ctl_s cn68xx; 8069232812Sjmallett struct cvmx_lmcx_wlevel_ctl_s cn68xxp1; 8070232812Sjmallett struct cvmx_lmcx_wlevel_ctl_s cnf71xx; 8071215976Sjmallett}; 8072215976Sjmalletttypedef union cvmx_lmcx_wlevel_ctl cvmx_lmcx_wlevel_ctl_t; 8073215976Sjmallett 8074215976Sjmallett/** 8075215976Sjmallett * cvmx_lmc#_wlevel_dbg 8076215976Sjmallett * 8077215976Sjmallett * Notes: 8078215976Sjmallett * A given write of LMC*_WLEVEL_DBG returns the write-leveling pass/fail results for all possible 8079215976Sjmallett * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled. 8080215976Sjmallett * LMC*_WLEVEL_DBG[BYTE] selects the particular byte. 8081215976Sjmallett * To get these pass/fail results for another different rank, you must run the hardware write-leveling 8082215976Sjmallett * again. For example, it is possible to get the BITMASK results for every byte of every rank 8083215976Sjmallett * if you run write-leveling separately for each rank, probing LMC*_WLEVEL_DBG between each 8084215976Sjmallett * write-leveling. 8085215976Sjmallett */ 8086232812Sjmallettunion cvmx_lmcx_wlevel_dbg { 8087215976Sjmallett uint64_t u64; 8088232812Sjmallett struct cvmx_lmcx_wlevel_dbg_s { 8089232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8090215976Sjmallett uint64_t reserved_12_63 : 52; 8091215976Sjmallett uint64_t bitmask : 8; /**< Bitmask generated during deskew settings sweep 8092215976Sjmallett if LMCX_WLEVEL_CTL[SSET]=0 8093215976Sjmallett BITMASK[n]=0 means deskew setting n failed 8094215976Sjmallett BITMASK[n]=1 means deskew setting n passed 8095215976Sjmallett for 0 <= n <= 7 8096215976Sjmallett BITMASK contains the first 8 results of the total 16 8097215976Sjmallett collected by LMC during the write-leveling sequence 8098215976Sjmallett else if LMCX_WLEVEL_CTL[SSET]=1 8099215976Sjmallett BITMASK[0]=0 means curr deskew setting failed 8100215976Sjmallett BITMASK[0]=1 means curr deskew setting passed */ 8101215976Sjmallett uint64_t byte : 4; /**< 0 <= BYTE <= 8 */ 8102215976Sjmallett#else 8103215976Sjmallett uint64_t byte : 4; 8104215976Sjmallett uint64_t bitmask : 8; 8105215976Sjmallett uint64_t reserved_12_63 : 52; 8106215976Sjmallett#endif 8107215976Sjmallett } s; 8108232812Sjmallett struct cvmx_lmcx_wlevel_dbg_s cn61xx; 8109215976Sjmallett struct cvmx_lmcx_wlevel_dbg_s cn63xx; 8110215976Sjmallett struct cvmx_lmcx_wlevel_dbg_s cn63xxp1; 8111232812Sjmallett struct cvmx_lmcx_wlevel_dbg_s cn66xx; 8112232812Sjmallett struct cvmx_lmcx_wlevel_dbg_s cn68xx; 8113232812Sjmallett struct cvmx_lmcx_wlevel_dbg_s cn68xxp1; 8114232812Sjmallett struct cvmx_lmcx_wlevel_dbg_s cnf71xx; 8115215976Sjmallett}; 8116215976Sjmalletttypedef union cvmx_lmcx_wlevel_dbg cvmx_lmcx_wlevel_dbg_t; 8117215976Sjmallett 8118215976Sjmallett/** 8119215976Sjmallett * cvmx_lmc#_wlevel_rank# 8120215976Sjmallett * 8121215976Sjmallett * Notes: 8122215976Sjmallett * This is four CSRs per LMC, one per each rank. 8123215976Sjmallett * 8124215976Sjmallett * Deskew setting is measured in units of 1/8 CK, so the above BYTE* values can range over 4 CKs. 8125215976Sjmallett * 8126215976Sjmallett * Assuming LMC*_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and 8127215976Sjmallett * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3 8128215976Sjmallett * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits. 8129215976Sjmallett * 8130215976Sjmallett * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.) 8131215976Sjmallett * 8132215976Sjmallett * SW initiates a HW write-leveling sequence by programming LMC*_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in LMC*_CONFIG. 8133215976Sjmallett * LMC will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of 8134215976Sjmallett * LMC*_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to indicate the 8135215976Sjmallett * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated 8136215976Sjmallett * bitmask, except that LMC will always write LMC*_WLEVEL_RANKi[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will 8137215976Sjmallett * set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to 4. 8138215976Sjmallett * See LMC*_WLEVEL_CTL. 8139215976Sjmallett * 8140215976Sjmallett * LMC*_WLEVEL_RANKi values for ranks i without attached DRAM should be set such that 8141215976Sjmallett * they do not increase the range of possible BYTE values for any byte 8142215976Sjmallett * lane. The easiest way to do this is to set 8143215976Sjmallett * LMC*_WLEVEL_RANKi = LMC*_WLEVEL_RANKj, 8144215976Sjmallett * where j is some rank with attached DRAM whose LMC*_WLEVEL_RANKj is already fully initialized. 8145215976Sjmallett */ 8146232812Sjmallettunion cvmx_lmcx_wlevel_rankx { 8147215976Sjmallett uint64_t u64; 8148232812Sjmallett struct cvmx_lmcx_wlevel_rankx_s { 8149232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8150215976Sjmallett uint64_t reserved_47_63 : 17; 8151215976Sjmallett uint64_t status : 2; /**< Indicates status of the write-leveling and where 8152215976Sjmallett the BYTE* programmings in <44:0> came from: 8153215976Sjmallett 0 = BYTE* values are their reset value 8154215976Sjmallett 1 = BYTE* values were set via a CSR write to this register 8155215976Sjmallett 2 = write-leveling sequence currently in progress (BYTE* values are unpredictable) 8156215976Sjmallett 3 = BYTE* values came from a complete write-leveling sequence, irrespective of 8157215976Sjmallett which lanes are masked via LMC*WLEVEL_CTL[LANEMASK] */ 8158215976Sjmallett uint64_t byte8 : 5; /**< Deskew setting 8159215976Sjmallett Bit 0 of BYTE8 must be zero during normal operation. 8160215976Sjmallett When ECC DRAM is not present (i.e. when DRAM is not 8161215976Sjmallett attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]), 8162215976Sjmallett SW should write BYTE8 with a value that does 8163215976Sjmallett not increase the range of possible BYTE* values. The 8164215976Sjmallett easiest way to do this is to set 8165215976Sjmallett LMC*_WLEVEL_RANK*[BYTE8] = LMC*_WLEVEL_RANK*[BYTE0] 8166215976Sjmallett when there is no ECC DRAM, using the final BYTE0 value. */ 8167215976Sjmallett uint64_t byte7 : 5; /**< Deskew setting 8168215976Sjmallett Bit 0 of BYTE7 must be zero during normal operation */ 8169215976Sjmallett uint64_t byte6 : 5; /**< Deskew setting 8170215976Sjmallett Bit 0 of BYTE6 must be zero during normal operation */ 8171215976Sjmallett uint64_t byte5 : 5; /**< Deskew setting 8172215976Sjmallett Bit 0 of BYTE5 must be zero during normal operation */ 8173215976Sjmallett uint64_t byte4 : 5; /**< Deskew setting 8174215976Sjmallett Bit 0 of BYTE4 must be zero during normal operation */ 8175215976Sjmallett uint64_t byte3 : 5; /**< Deskew setting 8176215976Sjmallett Bit 0 of BYTE3 must be zero during normal operation */ 8177215976Sjmallett uint64_t byte2 : 5; /**< Deskew setting 8178215976Sjmallett Bit 0 of BYTE2 must be zero during normal operation */ 8179215976Sjmallett uint64_t byte1 : 5; /**< Deskew setting 8180215976Sjmallett Bit 0 of BYTE1 must be zero during normal operation */ 8181215976Sjmallett uint64_t byte0 : 5; /**< Deskew setting 8182215976Sjmallett Bit 0 of BYTE0 must be zero during normal operation */ 8183215976Sjmallett#else 8184215976Sjmallett uint64_t byte0 : 5; 8185215976Sjmallett uint64_t byte1 : 5; 8186215976Sjmallett uint64_t byte2 : 5; 8187215976Sjmallett uint64_t byte3 : 5; 8188215976Sjmallett uint64_t byte4 : 5; 8189215976Sjmallett uint64_t byte5 : 5; 8190215976Sjmallett uint64_t byte6 : 5; 8191215976Sjmallett uint64_t byte7 : 5; 8192215976Sjmallett uint64_t byte8 : 5; 8193215976Sjmallett uint64_t status : 2; 8194215976Sjmallett uint64_t reserved_47_63 : 17; 8195215976Sjmallett#endif 8196215976Sjmallett } s; 8197232812Sjmallett struct cvmx_lmcx_wlevel_rankx_s cn61xx; 8198215976Sjmallett struct cvmx_lmcx_wlevel_rankx_s cn63xx; 8199215976Sjmallett struct cvmx_lmcx_wlevel_rankx_s cn63xxp1; 8200232812Sjmallett struct cvmx_lmcx_wlevel_rankx_s cn66xx; 8201232812Sjmallett struct cvmx_lmcx_wlevel_rankx_s cn68xx; 8202232812Sjmallett struct cvmx_lmcx_wlevel_rankx_s cn68xxp1; 8203232812Sjmallett struct cvmx_lmcx_wlevel_rankx_s cnf71xx; 8204215976Sjmallett}; 8205215976Sjmalletttypedef union cvmx_lmcx_wlevel_rankx cvmx_lmcx_wlevel_rankx_t; 8206215976Sjmallett 8207215976Sjmallett/** 8208215976Sjmallett * cvmx_lmc#_wodt_ctl0 8209215976Sjmallett * 8210215976Sjmallett * LMC_WODT_CTL0 = LMC Write OnDieTermination control 8211215976Sjmallett * See the description in LMC_WODT_CTL1. 8212215976Sjmallett * 8213215976Sjmallett * Notes: 8214215976Sjmallett * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask. See LMC_WODT_CTL1. 8215215976Sjmallett * 8216215976Sjmallett */ 8217232812Sjmallettunion cvmx_lmcx_wodt_ctl0 { 8218215976Sjmallett uint64_t u64; 8219232812Sjmallett struct cvmx_lmcx_wodt_ctl0_s { 8220232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8221215976Sjmallett uint64_t reserved_0_63 : 64; 8222215976Sjmallett#else 8223215976Sjmallett uint64_t reserved_0_63 : 64; 8224215976Sjmallett#endif 8225215976Sjmallett } s; 8226232812Sjmallett struct cvmx_lmcx_wodt_ctl0_cn30xx { 8227232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8228215976Sjmallett uint64_t reserved_32_63 : 32; 8229215976Sjmallett uint64_t wodt_d1_r1 : 8; /**< Write ODT mask DIMM1, RANK1 */ 8230215976Sjmallett uint64_t wodt_d1_r0 : 8; /**< Write ODT mask DIMM1, RANK0 */ 8231215976Sjmallett uint64_t wodt_d0_r1 : 8; /**< Write ODT mask DIMM0, RANK1 */ 8232215976Sjmallett uint64_t wodt_d0_r0 : 8; /**< Write ODT mask DIMM0, RANK0 */ 8233215976Sjmallett#else 8234215976Sjmallett uint64_t wodt_d0_r0 : 8; 8235215976Sjmallett uint64_t wodt_d0_r1 : 8; 8236215976Sjmallett uint64_t wodt_d1_r0 : 8; 8237215976Sjmallett uint64_t wodt_d1_r1 : 8; 8238215976Sjmallett uint64_t reserved_32_63 : 32; 8239215976Sjmallett#endif 8240215976Sjmallett } cn30xx; 8241215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx; 8242232812Sjmallett struct cvmx_lmcx_wodt_ctl0_cn38xx { 8243232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8244215976Sjmallett uint64_t reserved_32_63 : 32; 8245215976Sjmallett uint64_t wodt_hi3 : 4; /**< Write ODT mask for position 3, data[127:64] */ 8246215976Sjmallett uint64_t wodt_hi2 : 4; /**< Write ODT mask for position 2, data[127:64] */ 8247215976Sjmallett uint64_t wodt_hi1 : 4; /**< Write ODT mask for position 1, data[127:64] */ 8248215976Sjmallett uint64_t wodt_hi0 : 4; /**< Write ODT mask for position 0, data[127:64] */ 8249215976Sjmallett uint64_t wodt_lo3 : 4; /**< Write ODT mask for position 3, data[ 63: 0] */ 8250215976Sjmallett uint64_t wodt_lo2 : 4; /**< Write ODT mask for position 2, data[ 63: 0] */ 8251215976Sjmallett uint64_t wodt_lo1 : 4; /**< Write ODT mask for position 1, data[ 63: 0] */ 8252215976Sjmallett uint64_t wodt_lo0 : 4; /**< Write ODT mask for position 0, data[ 63: 0] */ 8253215976Sjmallett#else 8254215976Sjmallett uint64_t wodt_lo0 : 4; 8255215976Sjmallett uint64_t wodt_lo1 : 4; 8256215976Sjmallett uint64_t wodt_lo2 : 4; 8257215976Sjmallett uint64_t wodt_lo3 : 4; 8258215976Sjmallett uint64_t wodt_hi0 : 4; 8259215976Sjmallett uint64_t wodt_hi1 : 4; 8260215976Sjmallett uint64_t wodt_hi2 : 4; 8261215976Sjmallett uint64_t wodt_hi3 : 4; 8262215976Sjmallett uint64_t reserved_32_63 : 32; 8263215976Sjmallett#endif 8264215976Sjmallett } cn38xx; 8265215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2; 8266215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx; 8267215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx; 8268215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1; 8269215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx; 8270215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1; 8271215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx; 8272215976Sjmallett struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1; 8273215976Sjmallett}; 8274215976Sjmalletttypedef union cvmx_lmcx_wodt_ctl0 cvmx_lmcx_wodt_ctl0_t; 8275215976Sjmallett 8276215976Sjmallett/** 8277215976Sjmallett * cvmx_lmc#_wodt_ctl1 8278215976Sjmallett * 8279215976Sjmallett * LMC_WODT_CTL1 = LMC Write OnDieTermination control 8280215976Sjmallett * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations 8281215976Sjmallett * (667MHz and faster), especially on a multi-rank system. DDR2 DQ/DM/DQS I/O's have built in 8282215976Sjmallett * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF 8283215976Sjmallett * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts 8284215976Sjmallett * in that DIMM. System designers may prefer different combinations of ODT ON's for read and write 8285215976Sjmallett * into different ranks. Octeon supports full programmability by way of the mask register below. 8286215976Sjmallett * Each Rank position has its own 8-bit programmable field. 8287215976Sjmallett * When the controller does a write to that rank, it sets the 8 ODT pins to the MASK pins below. 8288215976Sjmallett * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines 8289215976Sjmallett * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010]. 8290215976Sjmallett * If ODT feature is not desired, the DDR parts can be programmed to not look at these pins by 8291215976Sjmallett * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT pins by default. 8292215976Sjmallett * If this feature is not required, write 0 in this register. 8293215976Sjmallett * 8294215976Sjmallett * Notes: 8295215976Sjmallett * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask. 8296215976Sjmallett * When a given RANK is selected, the WODT mask for that RANK is used. The resulting WODT mask is 8297215976Sjmallett * driven to the DIMMs in the following manner: 8298215976Sjmallett * BUNK_ENA=1 BUNK_ENA=0 8299215976Sjmallett * Mask[7] -> DIMM3, RANK1 DIMM3 8300215976Sjmallett * Mask[6] -> DIMM3, RANK0 8301215976Sjmallett * Mask[5] -> DIMM2, RANK1 DIMM2 8302215976Sjmallett * Mask[4] -> DIMM2, RANK0 8303215976Sjmallett * Mask[3] -> DIMM1, RANK1 DIMM1 8304215976Sjmallett * Mask[2] -> DIMM1, RANK0 8305215976Sjmallett * Mask[1] -> DIMM0, RANK1 DIMM0 8306215976Sjmallett * Mask[0] -> DIMM0, RANK0 8307215976Sjmallett */ 8308232812Sjmallettunion cvmx_lmcx_wodt_ctl1 { 8309215976Sjmallett uint64_t u64; 8310232812Sjmallett struct cvmx_lmcx_wodt_ctl1_s { 8311232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8312215976Sjmallett uint64_t reserved_32_63 : 32; 8313215976Sjmallett uint64_t wodt_d3_r1 : 8; /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked */ 8314215976Sjmallett uint64_t wodt_d3_r0 : 8; /**< Write ODT mask DIMM3, RANK0 */ 8315215976Sjmallett uint64_t wodt_d2_r1 : 8; /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked */ 8316215976Sjmallett uint64_t wodt_d2_r0 : 8; /**< Write ODT mask DIMM2, RANK0 */ 8317215976Sjmallett#else 8318215976Sjmallett uint64_t wodt_d2_r0 : 8; 8319215976Sjmallett uint64_t wodt_d2_r1 : 8; 8320215976Sjmallett uint64_t wodt_d3_r0 : 8; 8321215976Sjmallett uint64_t wodt_d3_r1 : 8; 8322215976Sjmallett uint64_t reserved_32_63 : 32; 8323215976Sjmallett#endif 8324215976Sjmallett } s; 8325215976Sjmallett struct cvmx_lmcx_wodt_ctl1_s cn30xx; 8326215976Sjmallett struct cvmx_lmcx_wodt_ctl1_s cn31xx; 8327215976Sjmallett struct cvmx_lmcx_wodt_ctl1_s cn52xx; 8328215976Sjmallett struct cvmx_lmcx_wodt_ctl1_s cn52xxp1; 8329215976Sjmallett struct cvmx_lmcx_wodt_ctl1_s cn56xx; 8330215976Sjmallett struct cvmx_lmcx_wodt_ctl1_s cn56xxp1; 8331215976Sjmallett}; 8332215976Sjmalletttypedef union cvmx_lmcx_wodt_ctl1 cvmx_lmcx_wodt_ctl1_t; 8333215976Sjmallett 8334215976Sjmallett/** 8335215976Sjmallett * cvmx_lmc#_wodt_mask 8336215976Sjmallett * 8337215976Sjmallett * LMC_WODT_MASK = LMC Write OnDieTermination mask 8338215976Sjmallett * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations 8339215976Sjmallett * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in 8340215976Sjmallett * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF 8341215976Sjmallett * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts 8342215976Sjmallett * in that DIMM. System designers may prefer different combinations of ODT ON's for writes 8343215976Sjmallett * into different ranks. Octeon supports full programmability by way of the mask register below. 8344215976Sjmallett * Each Rank position has its own 8-bit programmable field. 8345215976Sjmallett * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below. 8346215976Sjmallett * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines 8347215976Sjmallett * with the resistor on DIMM0/Rank1. The mask WODT_D0_R0 would then be [00000010]. 8348215976Sjmallett * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not 8349215976Sjmallett * required, write 0 in this register. 8350215976Sjmallett * 8351215976Sjmallett * Notes: 8352215976Sjmallett * When a given RANK is selected, the WODT mask for that RANK is used. The resulting WODT mask is 8353215976Sjmallett * driven to the DIMMs in the following manner: 8354215976Sjmallett * RANK_ENA=1 RANK_ENA=0 8355215976Sjmallett * Mask[3] -> DIMM1_ODT_1 MBZ 8356215976Sjmallett * Mask[2] -> DIMM1_ODT_0 DIMM1_ODT_0 8357215976Sjmallett * Mask[1] -> DIMM0_ODT_1 MBZ 8358215976Sjmallett * Mask[0] -> DIMM0_ODT_0 DIMM0_ODT_0 8359215976Sjmallett * 8360215976Sjmallett * LMC always writes entire cache blocks and always writes them via two consecutive 8361215976Sjmallett * write CAS operations to the same rank+bank+row spaced exactly 4 CK's apart. 8362215976Sjmallett * When a WODT mask bit is set, LMC asserts the OCTEON ODT output 8363215976Sjmallett * pin(s) starting the same CK as the first write CAS operation. Then, OCTEON 8364215976Sjmallett * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[WODT_BPRCH] more CK's 8365215976Sjmallett * - for a total of 10+LMC*_CONTROL[WODT_BPRCH] CK's for the entire cache block write - 8366215976Sjmallett * through the second write CAS operation of the cache block, 8367215976Sjmallett * satisfying the 6 CK DDR3 ODTH8 requirements. 8368215976Sjmallett * But it is possible for OCTEON to issue two cache block writes separated by as few as 8369215976Sjmallett * WtW = 8 or 9 (10 if LMC*_CONTROL[WODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s) 8370215976Sjmallett * for the WODT mask of the first cache block write for WtW CK's, then asserts 8371215976Sjmallett * the ODT output pin(s) for the WODT mask of the second cache block write for 10+LMC*_CONTROL[WODT_BPRCH] CK's 8372215976Sjmallett * (or less if a third cache block write follows within 8 or 9 (or 10) CK's of this second cache block write). 8373215976Sjmallett * Note that it may be necessary to force LMC to space back-to-back cache block writes 8374215976Sjmallett * to different ranks apart by at least 10+LMC*_CONTROL[WODT_BPRCH] CK's to prevent DDR3 ODTH8 violations. 8375215976Sjmallett */ 8376232812Sjmallettunion cvmx_lmcx_wodt_mask { 8377215976Sjmallett uint64_t u64; 8378232812Sjmallett struct cvmx_lmcx_wodt_mask_s { 8379232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD 8380215976Sjmallett uint64_t wodt_d3_r1 : 8; /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked 8381232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 8382215976Sjmallett uint64_t wodt_d3_r0 : 8; /**< Write ODT mask DIMM3, RANK0 8383232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 8384215976Sjmallett uint64_t wodt_d2_r1 : 8; /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked 8385232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 8386215976Sjmallett uint64_t wodt_d2_r0 : 8; /**< Write ODT mask DIMM2, RANK0 8387232812Sjmallett *UNUSED IN 6xxx, and MBZ* */ 8388215976Sjmallett uint64_t wodt_d1_r1 : 8; /**< Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked 8389215976Sjmallett if (!RANK_ENA) then WODT_D1_R1[3:0] MBZ 8390232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 8391215976Sjmallett uint64_t wodt_d1_r0 : 8; /**< Write ODT mask DIMM1, RANK0 8392215976Sjmallett if (!RANK_ENA) then WODT_D1_R0[3,1] MBZ 8393232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 8394215976Sjmallett uint64_t wodt_d0_r1 : 8; /**< Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked 8395215976Sjmallett if (!RANK_ENA) then WODT_D0_R1[3:0] MBZ 8396232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 8397215976Sjmallett uint64_t wodt_d0_r0 : 8; /**< Write ODT mask DIMM0, RANK0 8398215976Sjmallett if (!RANK_ENA) then WODT_D0_R0[3,1] MBZ 8399232812Sjmallett *Upper 4 bits UNUSED IN 6xxx, and MBZ* */ 8400215976Sjmallett#else 8401215976Sjmallett uint64_t wodt_d0_r0 : 8; 8402215976Sjmallett uint64_t wodt_d0_r1 : 8; 8403215976Sjmallett uint64_t wodt_d1_r0 : 8; 8404215976Sjmallett uint64_t wodt_d1_r1 : 8; 8405215976Sjmallett uint64_t wodt_d2_r0 : 8; 8406215976Sjmallett uint64_t wodt_d2_r1 : 8; 8407215976Sjmallett uint64_t wodt_d3_r0 : 8; 8408215976Sjmallett uint64_t wodt_d3_r1 : 8; 8409215976Sjmallett#endif 8410215976Sjmallett } s; 8411232812Sjmallett struct cvmx_lmcx_wodt_mask_s cn61xx; 8412215976Sjmallett struct cvmx_lmcx_wodt_mask_s cn63xx; 8413215976Sjmallett struct cvmx_lmcx_wodt_mask_s cn63xxp1; 8414232812Sjmallett struct cvmx_lmcx_wodt_mask_s cn66xx; 8415232812Sjmallett struct cvmx_lmcx_wodt_mask_s cn68xx; 8416232812Sjmallett struct cvmx_lmcx_wodt_mask_s cn68xxp1; 8417232812Sjmallett struct cvmx_lmcx_wodt_mask_s cnf71xx; 8418215976Sjmallett}; 8419215976Sjmalletttypedef union cvmx_lmcx_wodt_mask cvmx_lmcx_wodt_mask_t; 8420215976Sjmallett 8421215976Sjmallett#endif 8422