1210284Sjmallett/***********************license start***************
2232812Sjmallett * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3215990Sjmallett * reserved.
4210284Sjmallett *
5210284Sjmallett *
6215990Sjmallett * Redistribution and use in source and binary forms, with or without
7215990Sjmallett * modification, are permitted provided that the following conditions are
8215990Sjmallett * met:
9210284Sjmallett *
10215990Sjmallett *   * Redistributions of source code must retain the above copyright
11215990Sjmallett *     notice, this list of conditions and the following disclaimer.
12210284Sjmallett *
13215990Sjmallett *   * Redistributions in binary form must reproduce the above
14215990Sjmallett *     copyright notice, this list of conditions and the following
15215990Sjmallett *     disclaimer in the documentation and/or other materials provided
16215990Sjmallett *     with the distribution.
17215990Sjmallett
18232812Sjmallett *   * Neither the name of Cavium Inc. nor the names of
19215990Sjmallett *     its contributors may be used to endorse or promote products
20215990Sjmallett *     derived from this software without specific prior written
21215990Sjmallett *     permission.
22215990Sjmallett
23215990Sjmallett * This Software, including technical data, may be subject to U.S. export  control
24215990Sjmallett * laws, including the U.S. Export Administration Act and its  associated
25215990Sjmallett * regulations, and may be subject to export or import  regulations in other
26215990Sjmallett * countries.
27215990Sjmallett
28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29232812Sjmallett * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38210284Sjmallett ***********************license end**************************************/
39210284Sjmallett
40210284Sjmallett/**
41210284Sjmallett * @file
42210284Sjmallett *
43210284Sjmallett * Implementation of the Level 2 Cache (L2C) control,
44210284Sjmallett * measurement, and debugging facilities.
45210284Sjmallett *
46232812Sjmallett * <hr>$Revision: 70215 $<hr>
47210284Sjmallett *
48210284Sjmallett */
49232812Sjmallett
50215990Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
51215990Sjmallett#include <asm/octeon/cvmx.h>
52215990Sjmallett#include <asm/octeon/cvmx-l2c.h>
53215990Sjmallett#include <asm/octeon/cvmx-spinlock.h>
54215990Sjmallett#else
55215990Sjmallett#if !defined(__FreeBSD__) || !defined(_KERNEL)
56210284Sjmallett#include "cvmx-config.h"
57215990Sjmallett#endif
58210284Sjmallett#include "cvmx.h"
59210284Sjmallett#include "cvmx-l2c.h"
60210284Sjmallett#include "cvmx-spinlock.h"
61210284Sjmallett#include "cvmx-interrupt.h"
62215990Sjmallett#endif
63210284Sjmallett
64210284Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
65232812Sjmallett/*
66232812Sjmallett * This spinlock is used internally to ensure that only one core is
67232812Sjmallett * performing certain L2 operations at a time.
68232812Sjmallett *
69232812Sjmallett * NOTE: This only protects calls from within a single application -
70232812Sjmallett * if multiple applications or operating systems are running, then it
71232812Sjmallett * is up to the user program to coordinate between them.
72232812Sjmallett */
73210284SjmallettCVMX_SHARED cvmx_spinlock_t cvmx_l2c_spinlock;
74210284Sjmallett#endif
75210284Sjmallett
76210284Sjmallettint cvmx_l2c_get_core_way_partition(uint32_t core)
77210284Sjmallett{
78232812Sjmallett    uint32_t field;
79210284Sjmallett
80210284Sjmallett    /* Validate the core number */
81210284Sjmallett    if (core >= cvmx_octeon_num_cores())
82210284Sjmallett        return -1;
83210284Sjmallett
84232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
85215990Sjmallett        return (cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff);
86215990Sjmallett
87232812Sjmallett    /*
88232812Sjmallett     * Use the lower two bits of the coreNumber to determine the
89232812Sjmallett     * bit offset of the UMSK[] field in the L2C_SPAR register.
90210284Sjmallett     */
91210284Sjmallett    field = (core & 0x3) * 8;
92210284Sjmallett
93232812Sjmallett    /*
94232812Sjmallett     * Return the UMSK[] field from the appropriate L2C_SPAR
95232812Sjmallett     * register based on the coreNumber.
96210284Sjmallett     */
97210284Sjmallett
98232812Sjmallett    switch (core & 0xC) {
99232812Sjmallett    case 0x0:
100232812Sjmallett        return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
101232812Sjmallett    case 0x4:
102232812Sjmallett        return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
103232812Sjmallett    case 0x8:
104232812Sjmallett        return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
105232812Sjmallett    case 0xC:
106232812Sjmallett        return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
107210284Sjmallett    }
108232812Sjmallett    return 0;
109210284Sjmallett}
110210284Sjmallett
111210284Sjmallettint cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
112210284Sjmallett{
113232812Sjmallett    uint32_t field;
114232812Sjmallett    uint32_t valid_mask;
115210284Sjmallett
116210284Sjmallett    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
117210284Sjmallett
118210284Sjmallett    mask &= valid_mask;
119210284Sjmallett
120215990Sjmallett    /* A UMSK setting which blocks all L2C Ways is an error on some chips */
121232812Sjmallett    if (mask == valid_mask && (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
122210284Sjmallett        return -1;
123210284Sjmallett
124210284Sjmallett    /* Validate the core number */
125210284Sjmallett    if (core >= cvmx_octeon_num_cores())
126210284Sjmallett        return -1;
127210284Sjmallett
128232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
129232812Sjmallett        cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
130232812Sjmallett        return 0;
131215990Sjmallett    }
132210284Sjmallett
133232812Sjmallett    /*
134232812Sjmallett     * Use the lower two bits of core to determine the bit offset of the
135210284Sjmallett     * UMSK[] field in the L2C_SPAR register.
136210284Sjmallett     */
137210284Sjmallett    field = (core & 0x3) * 8;
138210284Sjmallett
139232812Sjmallett    /*
140232812Sjmallett     * Assign the new mask setting to the UMSK[] field in the appropriate
141210284Sjmallett     * L2C_SPAR register based on the core_num.
142210284Sjmallett     *
143210284Sjmallett     */
144232812Sjmallett    switch (core & 0xC) {
145232812Sjmallett    case 0x0:
146232812Sjmallett        cvmx_write_csr(CVMX_L2C_SPAR0,
147232812Sjmallett                   (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
148232812Sjmallett                   mask << field);
149232812Sjmallett        break;
150232812Sjmallett    case 0x4:
151232812Sjmallett        cvmx_write_csr(CVMX_L2C_SPAR1,
152232812Sjmallett                   (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
153232812Sjmallett                   mask << field);
154232812Sjmallett        break;
155232812Sjmallett    case 0x8:
156232812Sjmallett        cvmx_write_csr(CVMX_L2C_SPAR2,
157232812Sjmallett                   (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
158232812Sjmallett                   mask << field);
159232812Sjmallett        break;
160232812Sjmallett    case 0xC:
161232812Sjmallett        cvmx_write_csr(CVMX_L2C_SPAR3,
162232812Sjmallett                   (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
163232812Sjmallett                   mask << field);
164232812Sjmallett        break;
165210284Sjmallett    }
166210284Sjmallett    return 0;
167210284Sjmallett}
168210284Sjmallett
169210284Sjmallettint cvmx_l2c_set_hw_way_partition(uint32_t mask)
170210284Sjmallett{
171210284Sjmallett    uint32_t valid_mask;
172210284Sjmallett
173210284Sjmallett    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
174210284Sjmallett    mask &= valid_mask;
175210284Sjmallett
176215990Sjmallett    /* A UMSK setting which blocks all L2C Ways is an error on some chips */
177232812Sjmallett    if (mask == valid_mask  && (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
178210284Sjmallett        return -1;
179210284Sjmallett
180232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
181215990Sjmallett        cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
182215990Sjmallett    else
183232812Sjmallett        cvmx_write_csr(CVMX_L2C_SPAR4,
184232812Sjmallett                   (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
185210284Sjmallett    return 0;
186210284Sjmallett}
187210284Sjmallett
188210284Sjmallettint cvmx_l2c_get_hw_way_partition(void)
189210284Sjmallett{
190232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
191232812Sjmallett        return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
192215990Sjmallett    else
193232812Sjmallett        return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
194210284Sjmallett}
195210284Sjmallett
196232812Sjmallettint cvmx_l2c_set_hw_way_partition2(uint32_t mask)
197215990Sjmallett{
198232812Sjmallett    uint32_t valid_mask;
199210284Sjmallett
200232812Sjmallett        if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
201232812Sjmallett            return -1;
202210284Sjmallett
203232812Sjmallett    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
204232812Sjmallett    mask &= valid_mask;
205232812Sjmallett        cvmx_write_csr(CVMX_L2C_WPAR_IOBX(1), mask);
206232812Sjmallett        return 0;
207232812Sjmallett}
208232812Sjmallett
209232812Sjmallettint cvmx_l2c_get_hw_way_partition2(void)
210232812Sjmallett{
211232812Sjmallett        if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) {
212232812Sjmallett            cvmx_warn("only one IOB on this chip");
213232812Sjmallett            return -1;
214232812Sjmallett        }
215232812Sjmallett        return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(1)) & 0xffff;
216232812Sjmallett}
217232812Sjmallett
218232812Sjmallett
219232812Sjmallett
220232812Sjmallettvoid cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
221232812Sjmallett              uint32_t clear_on_read)
222232812Sjmallett{
223232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
224232812Sjmallett        union cvmx_l2c_pfctl pfctl;
225232812Sjmallett
226215990Sjmallett        pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
227215990Sjmallett
228232812Sjmallett        switch (counter) {
229232812Sjmallett        case 0:
230232812Sjmallett            pfctl.s.cnt0sel = event;
231232812Sjmallett            pfctl.s.cnt0ena = 1;
232232812Sjmallett            pfctl.s.cnt0rdclr = clear_on_read;
233232812Sjmallett            break;
234232812Sjmallett        case 1:
235232812Sjmallett            pfctl.s.cnt1sel = event;
236232812Sjmallett            pfctl.s.cnt1ena = 1;
237232812Sjmallett            pfctl.s.cnt1rdclr = clear_on_read;
238232812Sjmallett            break;
239232812Sjmallett        case 2:
240232812Sjmallett            pfctl.s.cnt2sel = event;
241232812Sjmallett            pfctl.s.cnt2ena = 1;
242232812Sjmallett            pfctl.s.cnt2rdclr = clear_on_read;
243232812Sjmallett            break;
244232812Sjmallett        case 3:
245232812Sjmallett        default:
246232812Sjmallett            pfctl.s.cnt3sel = event;
247232812Sjmallett            pfctl.s.cnt3ena = 1;
248232812Sjmallett            pfctl.s.cnt3rdclr = clear_on_read;
249232812Sjmallett            break;
250215990Sjmallett        }
251215990Sjmallett
252215990Sjmallett        cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
253232812Sjmallett    } else {
254232812Sjmallett        union cvmx_l2c_tadx_prf l2c_tadx_prf;
255215990Sjmallett        int tad;
256210284Sjmallett
257215990Sjmallett        cvmx_warn("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
258215990Sjmallett
259215990Sjmallett        cvmx_warn_if(clear_on_read, "L2C counters don't support clear on read for this chip\n");
260215990Sjmallett
261215990Sjmallett        l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
262215990Sjmallett
263232812Sjmallett        switch (counter) {
264232812Sjmallett        case 0:
265232812Sjmallett            l2c_tadx_prf.s.cnt0sel = event;
266232812Sjmallett            break;
267232812Sjmallett        case 1:
268232812Sjmallett            l2c_tadx_prf.s.cnt1sel = event;
269232812Sjmallett            break;
270232812Sjmallett        case 2:
271232812Sjmallett            l2c_tadx_prf.s.cnt2sel = event;
272232812Sjmallett            break;
273232812Sjmallett        default:
274232812Sjmallett        case 3:
275232812Sjmallett            l2c_tadx_prf.s.cnt3sel = event;
276232812Sjmallett            break;
277215990Sjmallett        }
278232812Sjmallett        for (tad = 0; tad < CVMX_L2C_TADS; tad++)
279232812Sjmallett            cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
280232812Sjmallett                       l2c_tadx_prf.u64);
281215990Sjmallett    }
282210284Sjmallett}
283210284Sjmallett
284210284Sjmallettuint64_t cvmx_l2c_read_perf(uint32_t counter)
285210284Sjmallett{
286232812Sjmallett    switch (counter) {
287232812Sjmallett    case 0:
288232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
289232812Sjmallett            return cvmx_read_csr(CVMX_L2C_PFC0);
290232812Sjmallett        else {
291232812Sjmallett            uint64_t counter = 0;
292232812Sjmallett            int tad;
293232812Sjmallett            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
294232812Sjmallett                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
295232812Sjmallett            return counter;
296232812Sjmallett        }
297232812Sjmallett    case 1:
298232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
299232812Sjmallett            return cvmx_read_csr(CVMX_L2C_PFC1);
300232812Sjmallett        else {
301232812Sjmallett            uint64_t counter = 0;
302232812Sjmallett            int tad;
303232812Sjmallett            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
304232812Sjmallett                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
305232812Sjmallett            return counter;
306232812Sjmallett        }
307232812Sjmallett    case 2:
308232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
309232812Sjmallett            return cvmx_read_csr(CVMX_L2C_PFC2);
310232812Sjmallett        else {
311232812Sjmallett            uint64_t counter = 0;
312232812Sjmallett            int tad;
313232812Sjmallett            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
314232812Sjmallett                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
315232812Sjmallett            return counter;
316232812Sjmallett        }
317232812Sjmallett    case 3:
318232812Sjmallett    default:
319232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
320232812Sjmallett            return cvmx_read_csr(CVMX_L2C_PFC3);
321232812Sjmallett        else {
322232812Sjmallett            uint64_t counter = 0;
323232812Sjmallett            int tad;
324232812Sjmallett            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
325232812Sjmallett                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
326232812Sjmallett            return counter;
327232812Sjmallett        }
328210284Sjmallett    }
329210284Sjmallett}
330210284Sjmallett
331210284Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
332210284Sjmallett/**
333210284Sjmallett * @INTERNAL
334210284Sjmallett * Helper function use to fault in cache lines for L2 cache locking
335210284Sjmallett *
336210284Sjmallett * @param addr   Address of base of memory region to read into L2 cache
337210284Sjmallett * @param len    Length (in bytes) of region to fault in
338210284Sjmallett */
339210284Sjmallettstatic void fault_in(uint64_t addr, int len)
340210284Sjmallett{
341210284Sjmallett    volatile char *ptr;
342210284Sjmallett    volatile char dummy;
343232812Sjmallett    /*
344232812Sjmallett     * Adjust addr and length so we get all cache lines even for
345232812Sjmallett     * small ranges spanning two cache lines.
346232812Sjmallett     */
347210284Sjmallett    len += addr & CVMX_CACHE_LINE_MASK;
348210284Sjmallett    addr &= ~CVMX_CACHE_LINE_MASK;
349210284Sjmallett    ptr = (volatile char *)cvmx_phys_to_ptr(addr);
350232812Sjmallett    /*
351232812Sjmallett     * Invalidate L1 cache to make sure all loads result in data
352232812Sjmallett     * being in L2.
353232812Sjmallett     */
354232812Sjmallett    CVMX_DCACHE_INVALIDATE;
355232812Sjmallett    while (len > 0) {
356210284Sjmallett        dummy += *ptr;
357210284Sjmallett        len -= CVMX_CACHE_LINE_SIZE;
358210284Sjmallett        ptr += CVMX_CACHE_LINE_SIZE;
359210284Sjmallett    }
360210284Sjmallett}
361210284Sjmallett
362210284Sjmallettint cvmx_l2c_lock_line(uint64_t addr)
363210284Sjmallett{
364232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
365215990Sjmallett        int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
366215990Sjmallett        uint64_t assoc = cvmx_l2c_get_num_assoc();
367232812Sjmallett        uint32_t tag = cvmx_l2c_v2_address_to_tag(addr);
368232812Sjmallett        uint64_t indext =  cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT;
369232812Sjmallett        uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, indext);
370215990Sjmallett        uint64_t way;
371232812Sjmallett        uint32_t tad;
372232812Sjmallett        union cvmx_l2c_tadx_tag l2c_tadx_tag;
373210284Sjmallett
374232812Sjmallett        if (tag == 0xFFFFFFFF) {
375232812Sjmallett            cvmx_dprintf("ERROR: cvmx_l2c_lock_line: addr 0x%llx in LMC hole."
376232812Sjmallett                         "\n", (unsigned long long) addr);
377232812Sjmallett            return -1;
378232812Sjmallett        }
379232812Sjmallett
380232812Sjmallett        tad = cvmx_l2c_address_to_tad(addr);
381232812Sjmallett
382232812Sjmallett        /* cvmx_dprintf("shift=%d index=%lx tag=%x\n",shift, index, tag); */
383215990Sjmallett        CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
384232812Sjmallett        CVMX_SYNCW;
385215990Sjmallett        /* Make sure we were able to lock the line */
386232812Sjmallett        for (way = 0; way < assoc; way++) {
387232812Sjmallett            uint64_t caddr = index | (way << shift);
388232812Sjmallett            CVMX_CACHE_LTGL2I(caddr, 0);
389232812Sjmallett            /* make sure CVMX_L2C_TADX_TAG is updated */
390232812Sjmallett            CVMX_SYNC;
391232812Sjmallett            l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(tad));
392215990Sjmallett            if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
393215990Sjmallett                break;
394232812Sjmallett            /* cvmx_printf("caddr=%lx tad=%d tagu64=%lx valid=%x tag=%x \n", caddr,
395232812Sjmallett               tad, l2c_tadx_tag.u64, l2c_tadx_tag.s.valid, l2c_tadx_tag.s.tag); */
396215990Sjmallett        }
397210284Sjmallett
398215990Sjmallett        /* Check if a valid line is found */
399232812Sjmallett        if (way >= assoc) {
400232812Sjmallett            /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at"
401232812Sjmallett                         " 0x%llx address\n", (unsigned long long)addr); */
402215990Sjmallett            return -1;
403215990Sjmallett        }
404210284Sjmallett
405215990Sjmallett        /* Check if lock bit is not set */
406232812Sjmallett        if (!l2c_tadx_tag.s.lock) {
407232812Sjmallett             /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at "
408232812Sjmallett               "0x%llx address\n", (unsigned long long)addr); */
409215990Sjmallett            return -1;
410215990Sjmallett        }
411232812Sjmallett        return 0;
412232812Sjmallett    } else {
413215990Sjmallett        int retval = 0;
414232812Sjmallett        union cvmx_l2c_dbg l2cdbg;
415232812Sjmallett        union cvmx_l2c_lckbase lckbase;
416232812Sjmallett        union cvmx_l2c_lckoff lckoff;
417232812Sjmallett        union cvmx_l2t_err l2t_err;
418210284Sjmallett
419215990Sjmallett        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
420210284Sjmallett
421215990Sjmallett        l2cdbg.u64 = 0;
422215990Sjmallett        lckbase.u64 = 0;
423215990Sjmallett        lckoff.u64 = 0;
424210284Sjmallett
425215990Sjmallett        /* Clear l2t error bits if set */
426215990Sjmallett        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
427215990Sjmallett        l2t_err.s.lckerr = 1;
428215990Sjmallett        l2t_err.s.lckerr2 = 1;
429215990Sjmallett        cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
430210284Sjmallett
431215990Sjmallett        addr &= ~CVMX_CACHE_LINE_MASK;
432210284Sjmallett
433215990Sjmallett        /* Set this core as debug core */
434215990Sjmallett        l2cdbg.s.ppnum = cvmx_get_core_num();
435215990Sjmallett        CVMX_SYNC;
436215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
437215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
438210284Sjmallett
439215990Sjmallett        lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
440215990Sjmallett        cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
441215990Sjmallett        cvmx_read_csr(CVMX_L2C_LCKOFF);
442210284Sjmallett
443232812Sjmallett        if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
444232812Sjmallett            int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * cvmx_l2c_get_set_bits() - 1;
445232812Sjmallett            uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> cvmx_l2c_get_set_bits();
446215990Sjmallett            lckbase.s.lck_base = addr_tmp >> 7;
447232812Sjmallett        } else {
448215990Sjmallett            lckbase.s.lck_base = addr >> 7;
449215990Sjmallett        }
450215990Sjmallett
451215990Sjmallett        lckbase.s.lck_ena = 1;
452215990Sjmallett        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
453232812Sjmallett        /* Make sure it gets there */
454232812Sjmallett        cvmx_read_csr(CVMX_L2C_LCKBASE);
455215990Sjmallett
456215990Sjmallett        fault_in(addr, CVMX_CACHE_LINE_SIZE);
457215990Sjmallett
458215990Sjmallett        lckbase.s.lck_ena = 0;
459215990Sjmallett        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
460232812Sjmallett        /* Make sure it gets there */
461232812Sjmallett        cvmx_read_csr(CVMX_L2C_LCKBASE);
462215990Sjmallett
463215990Sjmallett        /* Stop being debug core */
464215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, 0);
465215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
466215990Sjmallett
467215990Sjmallett        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
468215990Sjmallett        if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
469215990Sjmallett            retval = 1;  /* We were unable to lock the line */
470215990Sjmallett
471215990Sjmallett        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
472232812Sjmallett        return retval;
473215990Sjmallett    }
474210284Sjmallett}
475210284Sjmallett
476210284Sjmallettint cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
477210284Sjmallett{
478210284Sjmallett    int retval = 0;
479210284Sjmallett
480210284Sjmallett    /* Round start/end to cache line boundaries */
481210284Sjmallett    len += start & CVMX_CACHE_LINE_MASK;
482210284Sjmallett    start &= ~CVMX_CACHE_LINE_MASK;
483210284Sjmallett    len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
484210284Sjmallett
485232812Sjmallett    while (len) {
486232812Sjmallett        if (cvmx_l2c_lock_line(start) != 0)
487232812Sjmallett            retval--;
488210284Sjmallett        start += CVMX_CACHE_LINE_SIZE;
489210284Sjmallett        len -= CVMX_CACHE_LINE_SIZE;
490210284Sjmallett    }
491232812Sjmallett    return retval;
492210284Sjmallett}
493210284Sjmallett
494210284Sjmallettvoid cvmx_l2c_flush(void)
495210284Sjmallett{
496210284Sjmallett    uint64_t assoc, set;
497210284Sjmallett    uint64_t n_assoc, n_set;
498210284Sjmallett
499215990Sjmallett    n_set = cvmx_l2c_get_num_sets();
500215990Sjmallett    n_assoc = cvmx_l2c_get_num_assoc();
501210284Sjmallett
502232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
503215990Sjmallett        uint64_t address;
504215990Sjmallett        /* These may look like constants, but they aren't... */
505215990Sjmallett        int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
506215990Sjmallett        int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
507232812Sjmallett        for (set = 0; set < n_set; set++) {
508232812Sjmallett            for (assoc = 0; assoc < n_assoc; assoc++) {
509215990Sjmallett                address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
510232812Sjmallett                               (assoc << assoc_shift) |    (set << set_shift));
511215990Sjmallett                CVMX_CACHE_WBIL2I(address, 0);
512215990Sjmallett            }
513210284Sjmallett        }
514232812Sjmallett    } else {
515232812Sjmallett        for (set = 0; set < n_set; set++)
516232812Sjmallett            for (assoc = 0; assoc < n_assoc; assoc++)
517215990Sjmallett                cvmx_l2c_flush_line(assoc, set);
518215990Sjmallett    }
519210284Sjmallett}
520210284Sjmallett
521210284Sjmallettint cvmx_l2c_unlock_line(uint64_t address)
522210284Sjmallett{
523232812Sjmallett    uint32_t tad = cvmx_l2c_address_to_tad(address);
524210284Sjmallett
525232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
526232812Sjmallett        int assoc;
527232812Sjmallett        union cvmx_l2c_tag tag;
528215990Sjmallett        uint32_t tag_addr;
529215990Sjmallett        uint32_t index = cvmx_l2c_address_to_index(address);
530210284Sjmallett
531215990Sjmallett        tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
532215990Sjmallett
533232812Sjmallett        /*
534232812Sjmallett         * For OcteonII, we can flush a line by using the physical
535232812Sjmallett         * address directly, so finding the cache line used by
536232812Sjmallett         * the address is only required to provide the proper
537232812Sjmallett         * return value for the function.
538232812Sjmallett         */
539232812Sjmallett        for (assoc = 0; assoc < cvmx_l2c_get_num_assoc(); assoc++) {
540232812Sjmallett            tag = cvmx_l2c_get_tag_v2(assoc, index, tad);
541215990Sjmallett
542232812Sjmallett            if (tag.s.V && (tag.s.addr == tag_addr)) {
543215990Sjmallett                CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
544215990Sjmallett                return tag.s.L;
545215990Sjmallett            }
546215990Sjmallett        }
547232812Sjmallett    } else {
548215990Sjmallett        int assoc;
549232812Sjmallett        union cvmx_l2c_tag tag;
550215990Sjmallett        uint32_t tag_addr;
551210284Sjmallett
552215990Sjmallett        uint32_t index = cvmx_l2c_address_to_index(address);
553215990Sjmallett
554215990Sjmallett        /* Compute portion of address that is stored in tag */
555215990Sjmallett        tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
556232812Sjmallett        for (assoc = 0; assoc < cvmx_l2c_get_num_assoc(); assoc++) {
557232812Sjmallett            tag = cvmx_l2c_get_tag_v2(assoc, index, tad);
558210284Sjmallett
559232812Sjmallett            if (tag.s.V && (tag.s.addr == tag_addr)) {
560215990Sjmallett                cvmx_l2c_flush_line(assoc, index);
561215990Sjmallett                return tag.s.L;
562215990Sjmallett            }
563210284Sjmallett        }
564210284Sjmallett    }
565210284Sjmallett    return 0;
566210284Sjmallett}
567210284Sjmallett
568210284Sjmallettint cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
569210284Sjmallett{
570210284Sjmallett    int num_unlocked = 0;
571210284Sjmallett    /* Round start/end to cache line boundaries */
572210284Sjmallett    len += start & CVMX_CACHE_LINE_MASK;
573210284Sjmallett    start &= ~CVMX_CACHE_LINE_MASK;
574210284Sjmallett    len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
575232812Sjmallett    while (len > 0) {
576210284Sjmallett        num_unlocked += cvmx_l2c_unlock_line(start);
577210284Sjmallett        start += CVMX_CACHE_LINE_SIZE;
578210284Sjmallett        len -= CVMX_CACHE_LINE_SIZE;
579210284Sjmallett    }
580210284Sjmallett
581210284Sjmallett    return num_unlocked;
582210284Sjmallett}
583210284Sjmallett
584232812Sjmallett/*
585232812Sjmallett * Internal l2c tag types.  These are converted to a generic structure
586232812Sjmallett * that can be used on all chips.
587232812Sjmallett */
588232812Sjmallettunion __cvmx_l2c_tag {
589210284Sjmallett    uint64_t u64;
590232812Sjmallett#ifdef __BIG_ENDIAN_BITFIELD
591232812Sjmallett    struct cvmx_l2c_tag_cn50xx {
592232812Sjmallett        uint64_t reserved:40;
593232812Sjmallett        uint64_t V:1;        /* Line valid */
594232812Sjmallett        uint64_t D:1;        /* Line dirty */
595232812Sjmallett        uint64_t L:1;        /* Line locked */
596232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
597232812Sjmallett        uint64_t addr:20;    /* Phys mem addr (33..14) */
598210284Sjmallett    } cn50xx;
599232812Sjmallett    struct cvmx_l2c_tag_cn30xx {
600232812Sjmallett        uint64_t reserved:41;
601232812Sjmallett        uint64_t V:1;        /* Line valid */
602232812Sjmallett        uint64_t D:1;        /* Line dirty */
603232812Sjmallett        uint64_t L:1;        /* Line locked */
604232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
605232812Sjmallett        uint64_t addr:19;    /* Phys mem addr (33..15) */
606210284Sjmallett    } cn30xx;
607232812Sjmallett    struct cvmx_l2c_tag_cn31xx {
608232812Sjmallett        uint64_t reserved:42;
609232812Sjmallett        uint64_t V:1;        /* Line valid */
610232812Sjmallett        uint64_t D:1;        /* Line dirty */
611232812Sjmallett        uint64_t L:1;        /* Line locked */
612232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
613232812Sjmallett        uint64_t addr:18;    /* Phys mem addr (33..16) */
614210284Sjmallett    } cn31xx;
615232812Sjmallett    struct cvmx_l2c_tag_cn38xx {
616232812Sjmallett        uint64_t reserved:43;
617232812Sjmallett        uint64_t V:1;        /* Line valid */
618232812Sjmallett        uint64_t D:1;        /* Line dirty */
619232812Sjmallett        uint64_t L:1;        /* Line locked */
620232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
621232812Sjmallett        uint64_t addr:17;    /* Phys mem addr (33..17) */
622210284Sjmallett    } cn38xx;
623232812Sjmallett    struct cvmx_l2c_tag_cn58xx {
624232812Sjmallett        uint64_t reserved:44;
625232812Sjmallett        uint64_t V:1;        /* Line valid */
626232812Sjmallett        uint64_t D:1;        /* Line dirty */
627232812Sjmallett        uint64_t L:1;        /* Line locked */
628232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
629232812Sjmallett        uint64_t addr:16;    /* Phys mem addr (33..18) */
630210284Sjmallett    } cn58xx;
631232812Sjmallett#else
632232812Sjmallett    struct cvmx_l2c_tag_cn50xx {
633232812Sjmallett        uint64_t addr:20;    /* Phys mem addr (33..14) */
634232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
635232812Sjmallett        uint64_t L:1;        /* Line locked */
636232812Sjmallett        uint64_t D:1;        /* Line dirty */
637232812Sjmallett        uint64_t V:1;        /* Line valid */
638232812Sjmallett        uint64_t reserved:40;
639232812Sjmallett    } cn50xx;
640232812Sjmallett    struct cvmx_l2c_tag_cn30xx {
641232812Sjmallett        uint64_t addr:19;    /* Phys mem addr (33..15) */
642232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
643232812Sjmallett        uint64_t L:1;        /* Line locked */
644232812Sjmallett        uint64_t D:1;        /* Line dirty */
645232812Sjmallett        uint64_t V:1;        /* Line valid */
646232812Sjmallett        uint64_t reserved:41;
647232812Sjmallett    } cn30xx;
648232812Sjmallett    struct cvmx_l2c_tag_cn31xx {
649232812Sjmallett        uint64_t addr:18;    /* Phys mem addr (33..16) */
650232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
651232812Sjmallett        uint64_t L:1;        /* Line locked */
652232812Sjmallett        uint64_t D:1;        /* Line dirty */
653232812Sjmallett        uint64_t V:1;        /* Line valid */
654232812Sjmallett        uint64_t reserved:42;
655232812Sjmallett    } cn31xx;
656232812Sjmallett    struct cvmx_l2c_tag_cn38xx {
657232812Sjmallett        uint64_t addr:17;    /* Phys mem addr (33..17) */
658232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
659232812Sjmallett        uint64_t L:1;        /* Line locked */
660232812Sjmallett        uint64_t D:1;        /* Line dirty */
661232812Sjmallett        uint64_t V:1;        /* Line valid */
662232812Sjmallett        uint64_t reserved:43;
663232812Sjmallett    } cn38xx;
664232812Sjmallett    struct cvmx_l2c_tag_cn58xx {
665232812Sjmallett        uint64_t addr:16;    /* Phys mem addr (33..18) */
666232812Sjmallett        uint64_t U:1;        /* Use, LRU eviction */
667232812Sjmallett        uint64_t L:1;        /* Line locked */
668232812Sjmallett        uint64_t D:1;        /* Line dirty */
669232812Sjmallett        uint64_t V:1;        /* Line valid */
670232812Sjmallett        uint64_t reserved:44;
671232812Sjmallett    } cn58xx;
672210284Sjmallett#endif
673232812Sjmallett    struct cvmx_l2c_tag_cn58xx cn56xx;    /* 2048 sets */
674232812Sjmallett    struct cvmx_l2c_tag_cn31xx cn52xx;    /* 512 sets */
675232812Sjmallett};
676210284Sjmallett
677210284Sjmallett
678210284Sjmallett/**
679210284Sjmallett * @INTERNAL
680210284Sjmallett * Function to read a L2C tag.  This code make the current core
681210284Sjmallett * the 'debug core' for the L2.  This code must only be executed by
682210284Sjmallett * 1 core at a time.
683210284Sjmallett *
684210284Sjmallett * @param assoc  Association (way) of the tag to dump
685210284Sjmallett * @param index  Index of the cacheline
686210284Sjmallett *
687232812Sjmallett * @return The Octeon model specific tag structure.  This is
688232812Sjmallett *         translated by a wrapper function to a generic form that is
689232812Sjmallett *         easier for applications to use.
690210284Sjmallett */
691232812Sjmallettstatic union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
692210284Sjmallett{
693210284Sjmallett
694215990Sjmallett    uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
695210284Sjmallett    uint64_t core = cvmx_get_core_num();
696232812Sjmallett    union __cvmx_l2c_tag tag_val;
697210284Sjmallett    uint64_t dbg_addr = CVMX_L2C_DBG;
698215990Sjmallett    unsigned long flags;
699210284Sjmallett
700232812Sjmallett    union cvmx_l2c_dbg debug_val;
701210284Sjmallett    debug_val.u64 = 0;
702232812Sjmallett    /*
703232812Sjmallett     * For low core count parts, the core number is always small
704232812Sjmallett     * enough to stay in the correct field and not set any
705232812Sjmallett     * reserved bits.
706232812Sjmallett     */
707210284Sjmallett    debug_val.s.ppnum = core;
708210284Sjmallett    debug_val.s.l2t = 1;
709210284Sjmallett    debug_val.s.set = assoc;
710210284Sjmallett
711215990Sjmallett    cvmx_local_irq_save(flags);
712232812Sjmallett    /*
713232812Sjmallett     * Make sure core is quiet (no prefetches, etc.) before
714232812Sjmallett     * entering debug mode.
715232812Sjmallett     */
716232812Sjmallett    CVMX_SYNC;
717232812Sjmallett    /* Flush L1 to make sure debug load misses L1 */
718232812Sjmallett    CVMX_DCACHE_INVALIDATE;
719210284Sjmallett
720232812Sjmallett    /*
721232812Sjmallett     * The following must be done in assembly as when in debug
722232812Sjmallett     * mode all data loads from L2 return special debug data, not
723232812Sjmallett     * normal memory contents.  Also, interrupts must be disabled,
724232812Sjmallett     * since if an interrupt occurs while in debug mode the ISR
725232812Sjmallett     * will get debug data from all its memory * reads instead of
726232812Sjmallett     * the contents of memory.
727232812Sjmallett     */
728210284Sjmallett
729232812Sjmallett    asm volatile (
730232812Sjmallett        ".set push\n\t"
731232812Sjmallett        ".set mips64\n\t"
732232812Sjmallett        ".set noreorder\n\t"
733232812Sjmallett        "sd    %[dbg_val], 0(%[dbg_addr])\n\t"   /* Enter debug mode, wait for store */
734232812Sjmallett        "ld    $0, 0(%[dbg_addr])\n\t"
735232812Sjmallett        "ld    %[tag_val], 0(%[tag_addr])\n\t"   /* Read L2C tag data */
736232812Sjmallett        "sd    $0, 0(%[dbg_addr])\n\t"          /* Exit debug mode, wait for store */
737232812Sjmallett        "ld    $0, 0(%[dbg_addr])\n\t"
738232812Sjmallett        "cache 9, 0($0)\n\t"             /* Invalidate dcache to discard debug data */
739232812Sjmallett        ".set pop"
740232812Sjmallett        : [tag_val] "=r" (tag_val)
741232812Sjmallett        : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
742232812Sjmallett        : "memory");
743210284Sjmallett
744215990Sjmallett    cvmx_local_irq_restore(flags);
745210284Sjmallett
746232812Sjmallett    return tag_val;
747210284Sjmallett}
748210284Sjmallett
749210284Sjmallett
750232812Sjmallettunion cvmx_l2c_tag cvmx_l2c_get_tag_v2(uint32_t association, uint32_t index, uint32_t tad)
751210284Sjmallett{
752232812Sjmallett    union cvmx_l2c_tag tag;
753210284Sjmallett    tag.u64 = 0;
754210284Sjmallett
755232812Sjmallett    if ((int)association >= cvmx_l2c_get_num_assoc()) {
756215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
757232812Sjmallett        return tag;
758210284Sjmallett    }
759232812Sjmallett    if ((int)index >= cvmx_l2c_get_num_sets()) {
760232812Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
761232812Sjmallett                 (int)index, cvmx_l2c_get_num_sets());
762232812Sjmallett        return tag;
763210284Sjmallett    }
764232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
765232812Sjmallett        union cvmx_l2c_tadx_tag l2c_tadx_tag;
766215990Sjmallett        uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
767232812Sjmallett                        (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
768232812Sjmallett                        (index << CVMX_L2C_IDX_ADDR_SHIFT));
769232812Sjmallett        /*
770232812Sjmallett         * Use L2 cache Index load tag cache instruction, as
771232812Sjmallett         * hardware loads the virtual tag for the L2 cache
772232812Sjmallett         * block with the contents of L2C_TAD0_TAG
773232812Sjmallett         * register.
774232812Sjmallett         */
775232812Sjmallett        if (tad > CVMX_L2C_TADS) {
776232812Sjmallett            cvmx_dprintf("ERROR: cvmx_l2c_get_tag_v2: TAD#%d out of range\n", (unsigned int)tad);
777232812Sjmallett            return tag;
778232812Sjmallett        }
779215990Sjmallett        CVMX_CACHE_LTGL2I(address, 0);
780232812Sjmallett        CVMX_SYNC;   /* make sure CVMX_L2C_TADX_TAG is updated */
781232812Sjmallett        l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(tad));
782232812Sjmallett
783232812Sjmallett        tag.s.V     = l2c_tadx_tag.s.valid;
784232812Sjmallett        tag.s.D     = l2c_tadx_tag.s.dirty;
785232812Sjmallett        tag.s.L     = l2c_tadx_tag.s.lock;
786232812Sjmallett        tag.s.U     = l2c_tadx_tag.s.use;
787232812Sjmallett        tag.s.addr  = l2c_tadx_tag.s.tag;
788232812Sjmallett    } else {
789232812Sjmallett        union __cvmx_l2c_tag tmp_tag;
790232812Sjmallett        /* __read_l2_tag is intended for internal use only */
791232812Sjmallett        tmp_tag = __read_l2_tag(association, index);
792232812Sjmallett
793232812Sjmallett        /*
794232812Sjmallett         * Convert all tag structure types to generic version,
795232812Sjmallett         * as it can represent all models.
796232812Sjmallett         */
797232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
798232812Sjmallett            tag.s.V    = tmp_tag.cn58xx.V;
799232812Sjmallett            tag.s.D    = tmp_tag.cn58xx.D;
800232812Sjmallett            tag.s.L    = tmp_tag.cn58xx.L;
801232812Sjmallett            tag.s.U    = tmp_tag.cn58xx.U;
802232812Sjmallett            tag.s.addr = tmp_tag.cn58xx.addr;
803232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
804232812Sjmallett            tag.s.V    = tmp_tag.cn38xx.V;
805232812Sjmallett            tag.s.D    = tmp_tag.cn38xx.D;
806232812Sjmallett            tag.s.L    = tmp_tag.cn38xx.L;
807232812Sjmallett            tag.s.U    = tmp_tag.cn38xx.U;
808232812Sjmallett            tag.s.addr = tmp_tag.cn38xx.addr;
809232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
810232812Sjmallett            tag.s.V    = tmp_tag.cn31xx.V;
811232812Sjmallett            tag.s.D    = tmp_tag.cn31xx.D;
812232812Sjmallett            tag.s.L    = tmp_tag.cn31xx.L;
813232812Sjmallett            tag.s.U    = tmp_tag.cn31xx.U;
814232812Sjmallett            tag.s.addr = tmp_tag.cn31xx.addr;
815232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
816232812Sjmallett            tag.s.V    = tmp_tag.cn30xx.V;
817232812Sjmallett            tag.s.D    = tmp_tag.cn30xx.D;
818232812Sjmallett            tag.s.L    = tmp_tag.cn30xx.L;
819232812Sjmallett            tag.s.U    = tmp_tag.cn30xx.U;
820232812Sjmallett            tag.s.addr = tmp_tag.cn30xx.addr;
821232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
822232812Sjmallett            tag.s.V    = tmp_tag.cn50xx.V;
823232812Sjmallett            tag.s.D    = tmp_tag.cn50xx.D;
824232812Sjmallett            tag.s.L    = tmp_tag.cn50xx.L;
825232812Sjmallett            tag.s.U    = tmp_tag.cn50xx.U;
826232812Sjmallett            tag.s.addr = tmp_tag.cn50xx.addr;
827232812Sjmallett        } else {
828232812Sjmallett            cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
829232812Sjmallett        }
830232812Sjmallett    }
831232812Sjmallett    return tag;
832232812Sjmallett}
833232812Sjmallett
834232812Sjmallettunion cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
835232812Sjmallett{
836232812Sjmallett    union cvmx_l2c_tag tag;
837232812Sjmallett    tag.u64 = 0;
838232812Sjmallett
839232812Sjmallett    if ((int)association >= cvmx_l2c_get_num_assoc()) {
840232812Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
841232812Sjmallett        return tag;
842232812Sjmallett    }
843232812Sjmallett    if ((int)index >= cvmx_l2c_get_num_sets()) {
844232812Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
845232812Sjmallett                 (int)index, cvmx_l2c_get_num_sets());
846232812Sjmallett        return tag;
847232812Sjmallett    }
848232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
849232812Sjmallett        union cvmx_l2c_tadx_tag l2c_tadx_tag;
850232812Sjmallett        uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
851232812Sjmallett                        (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
852232812Sjmallett                        (index << CVMX_L2C_IDX_ADDR_SHIFT));
853232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
854232812Sjmallett            cvmx_dprintf("ERROR: Cannot use %s on OCTEON CN68XX, use cvmx_l2c_get_tag_v2 instead!\n",
855232812Sjmallett                     __func__);
856232812Sjmallett            return tag;
857232812Sjmallett        }
858232812Sjmallett        /*
859232812Sjmallett         * Use L2 cache Index load tag cache instruction, as
860232812Sjmallett         * hardware loads the virtual tag for the L2 cache
861232812Sjmallett         * block with the contents of L2C_TAD0_TAG
862232812Sjmallett         * register.
863232812Sjmallett         */
864232812Sjmallett        CVMX_CACHE_LTGL2I(address, 0);
865232812Sjmallett        CVMX_SYNC;   /* make sure CVMX_L2C_TADX_TAG is updated */
866215990Sjmallett        l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
867210284Sjmallett
868215990Sjmallett        tag.s.V     = l2c_tadx_tag.s.valid;
869215990Sjmallett        tag.s.D     = l2c_tadx_tag.s.dirty;
870215990Sjmallett        tag.s.L     = l2c_tadx_tag.s.lock;
871215990Sjmallett        tag.s.U     = l2c_tadx_tag.s.use;
872215990Sjmallett        tag.s.addr  = l2c_tadx_tag.s.tag;
873232812Sjmallett    } else {
874232812Sjmallett        union __cvmx_l2c_tag tmp_tag;
875215990Sjmallett        /* __read_l2_tag is intended for internal use only */
876215990Sjmallett        tmp_tag = __read_l2_tag(association, index);
877215990Sjmallett
878232812Sjmallett        /*
879232812Sjmallett         * Convert all tag structure types to generic version,
880232812Sjmallett         * as it can represent all models.
881232812Sjmallett         */
882232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
883215990Sjmallett            tag.s.V    = tmp_tag.cn58xx.V;
884215990Sjmallett            tag.s.D    = tmp_tag.cn58xx.D;
885215990Sjmallett            tag.s.L    = tmp_tag.cn58xx.L;
886215990Sjmallett            tag.s.U    = tmp_tag.cn58xx.U;
887215990Sjmallett            tag.s.addr = tmp_tag.cn58xx.addr;
888232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
889215990Sjmallett            tag.s.V    = tmp_tag.cn38xx.V;
890215990Sjmallett            tag.s.D    = tmp_tag.cn38xx.D;
891215990Sjmallett            tag.s.L    = tmp_tag.cn38xx.L;
892215990Sjmallett            tag.s.U    = tmp_tag.cn38xx.U;
893215990Sjmallett            tag.s.addr = tmp_tag.cn38xx.addr;
894232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
895215990Sjmallett            tag.s.V    = tmp_tag.cn31xx.V;
896215990Sjmallett            tag.s.D    = tmp_tag.cn31xx.D;
897215990Sjmallett            tag.s.L    = tmp_tag.cn31xx.L;
898215990Sjmallett            tag.s.U    = tmp_tag.cn31xx.U;
899215990Sjmallett            tag.s.addr = tmp_tag.cn31xx.addr;
900232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
901215990Sjmallett            tag.s.V    = tmp_tag.cn30xx.V;
902215990Sjmallett            tag.s.D    = tmp_tag.cn30xx.D;
903215990Sjmallett            tag.s.L    = tmp_tag.cn30xx.L;
904215990Sjmallett            tag.s.U    = tmp_tag.cn30xx.U;
905215990Sjmallett            tag.s.addr = tmp_tag.cn30xx.addr;
906232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
907215990Sjmallett            tag.s.V    = tmp_tag.cn50xx.V;
908215990Sjmallett            tag.s.D    = tmp_tag.cn50xx.D;
909215990Sjmallett            tag.s.L    = tmp_tag.cn50xx.L;
910215990Sjmallett            tag.s.U    = tmp_tag.cn50xx.U;
911215990Sjmallett            tag.s.addr = tmp_tag.cn50xx.addr;
912232812Sjmallett        } else {
913232812Sjmallett            cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
914215990Sjmallett        }
915210284Sjmallett    }
916210284Sjmallett    return tag;
917210284Sjmallett}
918210284Sjmallett#endif
919210284Sjmallett
920232812Sjmallettint cvmx_l2c_address_to_tad(uint64_t addr)
921210284Sjmallett{
922232812Sjmallett    uint32_t tad;
923232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
924232812Sjmallett        cvmx_l2c_ctl_t l2c_ctl;
925232812Sjmallett        l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
926232812Sjmallett        if (!l2c_ctl.s.disidxalias) {
927232812Sjmallett            tad = ((addr >> 7) ^ (addr >> 12) ^ (addr >> 18)) & 3;
928232812Sjmallett        } else {
929232812Sjmallett            tad = (addr >> 7) & 3;
930232812Sjmallett        }
931232812Sjmallett    } else {
932232812Sjmallett        tad = 0;
933232812Sjmallett    }
934232812Sjmallett    return tad;
935232812Sjmallett}
936232812Sjmallett
937232812Sjmallettuint32_t cvmx_l2c_v2_address_to_tag(uint64_t addr)
938232812Sjmallett{
939232812Sjmallett#define DR0_END   ( (256 * 1024 * 1024) -1)
940232812Sjmallett#define DR1_START (512 * 1024 * 1024)
941232812Sjmallett#define L2_HOLE   (256 * 1024 * 1024)
942232812Sjmallett
943232812Sjmallett    if ( (addr > DR0_END) && (addr < DR1_START) ) return (uint32_t) (-1);
944232812Sjmallett    if (addr > DR1_START) addr = addr - L2_HOLE ;
945232812Sjmallett    addr = addr & 0x7FFFFFFFFULL;
946232812Sjmallett    return (uint32_t )(addr >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
947232812Sjmallett}
948232812Sjmallett
949232812Sjmallettuint32_t cvmx_l2c_address_to_index(uint64_t addr)
950232812Sjmallett{
951210284Sjmallett    uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
952215990Sjmallett    int indxalias = 0;
953210284Sjmallett
954232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
955232812Sjmallett        union cvmx_l2c_ctl l2c_ctl;
956215990Sjmallett        l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
957215990Sjmallett        indxalias = !l2c_ctl.s.disidxalias;
958232812Sjmallett    } else {
959232812Sjmallett        union cvmx_l2c_cfg l2c_cfg;
960215990Sjmallett        l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
961215990Sjmallett        indxalias = l2c_cfg.s.idxalias;
962215990Sjmallett    }
963215990Sjmallett
964232812Sjmallett    if (indxalias) {
965232812Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
966215990Sjmallett            uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
967232812Sjmallett            idx ^= (idx / cvmx_l2c_get_num_sets()) & 0x3ff;
968232812Sjmallett            idx ^= a_14_12 & 0x3;
969232812Sjmallett            idx ^= a_14_12 << 2;
970232812Sjmallett        } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
971232812Sjmallett            uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
972215990Sjmallett            idx ^= idx / cvmx_l2c_get_num_sets();
973215990Sjmallett            idx ^= a_14_12;
974232812Sjmallett        } else {
975215990Sjmallett            idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
976215990Sjmallett        }
977215990Sjmallett    }
978210284Sjmallett    idx &= CVMX_L2C_IDX_MASK;
979232812Sjmallett    return idx;
980210284Sjmallett}
981210284Sjmallett
982210284Sjmallettint cvmx_l2c_get_cache_size_bytes(void)
983210284Sjmallett{
984232812Sjmallett    return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
985232812Sjmallett        CVMX_CACHE_LINE_SIZE;
986210284Sjmallett}
987210284Sjmallett
988210284Sjmallett/**
989210284Sjmallett * Return log base 2 of the number of sets in the L2 cache
990210284Sjmallett * @return
991210284Sjmallett */
992210284Sjmallettint cvmx_l2c_get_set_bits(void)
993210284Sjmallett{
994210284Sjmallett    int l2_set_bits;
995232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
996232812Sjmallett        l2_set_bits = 11;    /* 2048 sets */
997232812Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
998232812Sjmallett        l2_set_bits = 10;    /* 1024 sets */
999232812Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
1000232812Sjmallett        l2_set_bits = 9;    /* 512 sets */
1001210284Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
1002232812Sjmallett        l2_set_bits = 8;    /* 256 sets */
1003210284Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
1004232812Sjmallett        l2_set_bits = 7;    /* 128 sets */
1005232812Sjmallett    else {
1006232812Sjmallett        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
1007232812Sjmallett        l2_set_bits = 11;    /* 2048 sets */
1008210284Sjmallett    }
1009232812Sjmallett    return l2_set_bits;
1010210284Sjmallett}
1011210284Sjmallett
1012210284Sjmallett/* Return the number of sets in the L2 Cache */
1013210284Sjmallettint cvmx_l2c_get_num_sets(void)
1014210284Sjmallett{
1015232812Sjmallett    return 1 << cvmx_l2c_get_set_bits();
1016210284Sjmallett}
1017210284Sjmallett
1018210284Sjmallett/* Return the number of associations in the L2 Cache */
1019210284Sjmallettint cvmx_l2c_get_num_assoc(void)
1020210284Sjmallett{
1021210284Sjmallett    int l2_assoc;
1022210284Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
1023210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN52XX) ||
1024210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN58XX) ||
1025210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN50XX) ||
1026210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN38XX))
1027232812Sjmallett        l2_assoc = 8;
1028232812Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
1029232812Sjmallett        l2_assoc = 16;
1030215990Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
1031232812Sjmallett         OCTEON_IS_MODEL(OCTEON_CN30XX))
1032232812Sjmallett        l2_assoc = 4;
1033232812Sjmallett    else {
1034232812Sjmallett        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
1035232812Sjmallett        l2_assoc = 8;
1036210284Sjmallett    }
1037210284Sjmallett
1038210284Sjmallett    /* Check to see if part of the cache is disabled */
1039232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
1040232812Sjmallett        union cvmx_mio_fus_dat3 mio_fus_dat3;
1041210284Sjmallett
1042215990Sjmallett        mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
1043232812Sjmallett        /*
1044232812Sjmallett         * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
1045232812Sjmallett         * <2> will be not used for 63xx
1046232812Sjmallett         * <1> disables 1/2 ways
1047232812Sjmallett         * <0> disables 1/4 ways
1048232812Sjmallett         * They are cumulative, so for 63xx:
1049232812Sjmallett         * <1> <0>
1050232812Sjmallett         * 0 0 16-way 2MB cache
1051232812Sjmallett         * 0 1 12-way 1.5MB cache
1052232812Sjmallett         * 1 0 8-way 1MB cache
1053232812Sjmallett         * 1 1 4-way 512KB cache
1054232812Sjmallett         */
1055215990Sjmallett
1056232812Sjmallett        if (mio_fus_dat3.cn63xx.l2c_crip == 3)
1057215990Sjmallett            l2_assoc = 4;
1058232812Sjmallett        else if (mio_fus_dat3.cn63xx.l2c_crip == 2)
1059215990Sjmallett            l2_assoc = 8;
1060232812Sjmallett        else if (mio_fus_dat3.cn63xx.l2c_crip == 1)
1061215990Sjmallett            l2_assoc = 12;
1062232812Sjmallett    } else {
1063232812Sjmallett        union cvmx_l2d_fus3 val;
1064215990Sjmallett        val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
1065232812Sjmallett        /*
1066232812Sjmallett         * Using shifts here, as bit position names are
1067232812Sjmallett         * different for each model but they all mean the
1068232812Sjmallett         * same.
1069232812Sjmallett         */
1070215990Sjmallett        if ((val.u64 >> 35) & 0x1)
1071215990Sjmallett            l2_assoc = l2_assoc >> 2;
1072215990Sjmallett        else if ((val.u64 >> 34) & 0x1)
1073215990Sjmallett            l2_assoc = l2_assoc >> 1;
1074215990Sjmallett    }
1075232812Sjmallett    return l2_assoc;
1076210284Sjmallett}
1077210284Sjmallett
1078210284Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
1079210284Sjmallett/**
1080210284Sjmallett * Flush a line from the L2 cache
1081210284Sjmallett * This should only be called from one core at a time, as this routine
1082210284Sjmallett * sets the core to the 'debug' core in order to flush the line.
1083210284Sjmallett *
1084210284Sjmallett * @param assoc  Association (or way) to flush
1085210284Sjmallett * @param index  Index to flush
1086210284Sjmallett */
1087210284Sjmallettvoid cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
1088210284Sjmallett{
1089215990Sjmallett    /* Check the range of the index. */
1090232812Sjmallett    if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
1091215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
1092215990Sjmallett        return;
1093215990Sjmallett    }
1094210284Sjmallett
1095215990Sjmallett    /* Check the range of association. */
1096232812Sjmallett    if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
1097215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
1098215990Sjmallett        return;
1099215990Sjmallett    }
1100210284Sjmallett
1101232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
1102215990Sjmallett        uint64_t address;
1103215990Sjmallett        /* Create the address based on index and association.
1104232812Sjmallett         * Bits<20:17> select the way of the cache block involved in
1105232812Sjmallett         *             the operation
1106232812Sjmallett         * Bits<16:7> of the effect address select the index
1107232812Sjmallett         */
1108215990Sjmallett        address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
1109232812Sjmallett                (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
1110232812Sjmallett                (index << CVMX_L2C_IDX_ADDR_SHIFT));
1111215990Sjmallett        CVMX_CACHE_WBIL2I(address, 0);
1112232812Sjmallett    } else {
1113232812Sjmallett        union cvmx_l2c_dbg l2cdbg;
1114210284Sjmallett
1115215990Sjmallett        l2cdbg.u64 = 0;
1116215990Sjmallett        if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
1117215990Sjmallett            l2cdbg.s.ppnum = cvmx_get_core_num();
1118215990Sjmallett        l2cdbg.s.finv = 1;
1119215990Sjmallett
1120215990Sjmallett        l2cdbg.s.set = assoc;
1121215990Sjmallett        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
1122232812Sjmallett        /*
1123232812Sjmallett         * Enter debug mode, and make sure all other writes
1124232812Sjmallett         * complete before we enter debug mode
1125232812Sjmallett         */
1126215990Sjmallett        CVMX_SYNC;
1127215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
1128215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
1129215990Sjmallett
1130232812Sjmallett        CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
1131232812Sjmallett                            index * CVMX_CACHE_LINE_SIZE),
1132232812Sjmallett                       0);
1133215990Sjmallett        /* Exit debug mode */
1134215990Sjmallett        CVMX_SYNC;
1135215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, 0);
1136215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
1137215990Sjmallett        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
1138215990Sjmallett    }
1139210284Sjmallett}
1140210284Sjmallett#endif
1141215990Sjmallett
1142232812Sjmallett/**
1143232812Sjmallett * Initialize the BIG address in L2C+DRAM to generate proper error
1144232812Sjmallett * on reading/writing to an non-existant memory location.
1145232812Sjmallett *
1146232812Sjmallett * @param mem_size  Amount of DRAM configured in MB.
1147232812Sjmallett * @param mode      Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
1148232812Sjmallett */
1149232812Sjmallettvoid cvmx_l2c_set_big_size(uint64_t mem_size, int mode)
1150232812Sjmallett{
1151232812Sjmallett    if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
1152232812Sjmallett         && !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
1153232812Sjmallett    {
1154232812Sjmallett        cvmx_l2c_big_ctl_t big_ctl;
1155232812Sjmallett        int bits = 0, zero_bits = 0;
1156232812Sjmallett        uint64_t mem;
1157215990Sjmallett
1158232812Sjmallett        if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024))
1159232812Sjmallett        {
1160232812Sjmallett            cvmx_dprintf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
1161232812Sjmallett                (unsigned long long)mem_size, (unsigned long long)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
1162232812Sjmallett            mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
1163232812Sjmallett        }
1164232812Sjmallett
1165232812Sjmallett        mem = mem_size;
1166232812Sjmallett        while (mem)
1167232812Sjmallett        {
1168232812Sjmallett            if ((mem & 1) == 0)
1169232812Sjmallett                zero_bits++;
1170232812Sjmallett            bits++;
1171232812Sjmallett            mem >>= 1;
1172232812Sjmallett        }
1173232812Sjmallett
1174232812Sjmallett        if ((bits - zero_bits) != 1 || (bits - 9) <= 0)
1175232812Sjmallett        {
1176232812Sjmallett            cvmx_dprintf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n", (unsigned long long)mem_size);
1177232812Sjmallett            return;
1178232812Sjmallett        }
1179232812Sjmallett
1180232812Sjmallett        big_ctl.u64 = 0;
1181232812Sjmallett        big_ctl.s.maxdram = bits - 9;
1182232812Sjmallett        big_ctl.s.disable = mode;
1183232812Sjmallett        cvmx_write_csr(CVMX_L2C_BIG_CTL, big_ctl.u64);
1184232812Sjmallett    }
1185232812Sjmallett}
1186232812Sjmallett
1187232812Sjmallett#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL)
1188215990Sjmallett/* L2C Virtualization APIs. These APIs are based on Octeon II documentation. */
1189215990Sjmallett
1190232812Sjmallett/*
1191232812Sjmallett * These could be used by the Linux kernel, but currently are not, so
1192232812Sjmallett * disable them to save space.
1193232812Sjmallett */
1194232812Sjmallett
1195215990Sjmallett/**
1196215990Sjmallett * @INTERNAL
1197215990Sjmallett * Helper function to decode VALUE to number of allowed virtualization IDS.
1198215990Sjmallett * Returns L2C_VRT_CTL[NUMID].
1199232812Sjmallett *
1200215990Sjmallett * @param nvid     Number of virtual Ids.
1201215990Sjmallett * @return         On success decode to NUMID, or to -1 on failure.
1202215990Sjmallett */
1203215990Sjmallettstatic inline int __cvmx_l2c_vrt_decode_numid(int nvid)
1204215990Sjmallett{
1205215990Sjmallett    int bits = -1;
1206215990Sjmallett    int zero_bits = -1;
1207215990Sjmallett
1208232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1209232812Sjmallett        return -1;
1210215990Sjmallett
1211232812Sjmallett    if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
1212232812Sjmallett        cvmx_dprintf("WARNING: Invalid number of virtual ids(%d) requested, should be <= 64\n",
1213232812Sjmallett                 nvid);
1214232812Sjmallett        return bits;
1215232812Sjmallett    }
1216215990Sjmallett
1217232812Sjmallett    while (nvid) {
1218232812Sjmallett        if ((nvid & 1) == 0)
1219232812Sjmallett            zero_bits++;
1220215990Sjmallett
1221232812Sjmallett        bits++;
1222232812Sjmallett        nvid >>= 1;
1223215990Sjmallett    }
1224232812Sjmallett
1225232812Sjmallett    if (bits == 1 || (zero_bits && ((bits - zero_bits) == 1)))
1226232812Sjmallett        return zero_bits;
1227215990Sjmallett    return -1;
1228215990Sjmallett}
1229215990Sjmallett
1230215990Sjmallett/**
1231232812Sjmallett * Set maxium number of Virtual IDs allowed in a machine.
1232215990Sjmallett *
1233215990Sjmallett * @param nvid   Number of virtial ids allowed in a machine.
1234215990Sjmallett * @return       Return 0 on success or -1 on failure.
1235215990Sjmallett */
1236215990Sjmallettint cvmx_l2c_vrt_set_max_virtids(int nvid)
1237215990Sjmallett{
1238232812Sjmallett    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1239215990Sjmallett
1240232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1241232812Sjmallett        return -1;
1242215990Sjmallett
1243232812Sjmallett    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1244215990Sjmallett
1245232812Sjmallett    if (l2c_vrt_ctl.s.enable) {
1246232812Sjmallett        cvmx_dprintf("WARNING: Changing number of Virtual Machine IDs is not allowed after Virtualization is enabled\n");
1247232812Sjmallett        return -1;
1248232812Sjmallett    }
1249215990Sjmallett
1250232812Sjmallett    if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
1251232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_max_virtids: Invalid number of Virtual Machine IDs(%d) requested, max allowed %d\n",
1252232812Sjmallett                 nvid, CVMX_L2C_VRT_MAX_VIRTID_ALLOWED);
1253232812Sjmallett        return -1;
1254215990Sjmallett    }
1255232812Sjmallett
1256232812Sjmallett    /* Calculate the numid based on nvid */
1257232812Sjmallett    l2c_vrt_ctl.s.numid = __cvmx_l2c_vrt_decode_numid(nvid);
1258232812Sjmallett    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1259215990Sjmallett    return 0;
1260215990Sjmallett}
1261215990Sjmallett
1262215990Sjmallett/**
1263215990Sjmallett * Get maxium number of virtual IDs allowed in a machine.
1264215990Sjmallett *
1265215990Sjmallett * @return  Return number of virtual machine IDs or -1 on failure.
1266215990Sjmallett */
1267215990Sjmallettint cvmx_l2c_vrt_get_max_virtids(void)
1268215990Sjmallett{
1269232812Sjmallett    int virtids;
1270232812Sjmallett    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1271215990Sjmallett
1272232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1273232812Sjmallett        return -1;
1274232812Sjmallett
1275232812Sjmallett    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1276232812Sjmallett    virtids = 1 << (l2c_vrt_ctl.s.numid + 1);
1277232812Sjmallett    if (virtids > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
1278232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_get_max_virtids: Invalid number of Virtual IDs initialized (%d)\n",
1279232812Sjmallett                 virtids);
1280232812Sjmallett        return -1;
1281215990Sjmallett    }
1282215990Sjmallett    return virtids;
1283215990Sjmallett}
1284215990Sjmallett
1285215990Sjmallett/**
1286215990Sjmallett * @INTERNAL
1287215990Sjmallett * Helper function to decode VALUE to memory space coverage of L2C_VRT_MEM.
1288215990Sjmallett * Returns L2C_VRT_CTL[MEMSZ].
1289232812Sjmallett *
1290215990Sjmallett * @param memsz    Memory in GB.
1291215990Sjmallett * @return         On success, decode to MEMSZ, or on failure return -1.
1292215990Sjmallett */
1293215990Sjmallettstatic inline int __cvmx_l2c_vrt_decode_memsize(int memsz)
1294215990Sjmallett{
1295215990Sjmallett    int bits = 0;
1296215990Sjmallett    int zero_bits = 0;
1297215990Sjmallett
1298232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1299232812Sjmallett        return -1;
1300215990Sjmallett
1301232812Sjmallett    if (memsz == 0 || memsz > CVMX_L2C_MAX_MEMSZ_ALLOWED) {
1302232812Sjmallett        cvmx_dprintf("WARNING: Invalid virtual memory size(%d) requested, should be <= %d\n",
1303232812Sjmallett                 memsz, CVMX_L2C_MAX_MEMSZ_ALLOWED);
1304232812Sjmallett        return -1;
1305232812Sjmallett    }
1306215990Sjmallett
1307232812Sjmallett    while (memsz) {
1308232812Sjmallett        if ((memsz & 1) == 0)
1309232812Sjmallett            zero_bits++;
1310215990Sjmallett
1311232812Sjmallett        bits++;
1312232812Sjmallett        memsz >>= 1;
1313215990Sjmallett    }
1314232812Sjmallett
1315232812Sjmallett    if (bits == 1 || (bits - zero_bits) == 1)
1316232812Sjmallett        return zero_bits;
1317215990Sjmallett    return -1;
1318215990Sjmallett}
1319215990Sjmallett
1320215990Sjmallett/**
1321215990Sjmallett * Set the maxium size of memory space to be allocated for virtualization.
1322215990Sjmallett *
1323215990Sjmallett * @param memsz  Size of the virtual memory in GB
1324215990Sjmallett * @return       Return 0 on success or -1 on failure.
1325215990Sjmallett */
1326215990Sjmallettint cvmx_l2c_vrt_set_max_memsz(int memsz)
1327215990Sjmallett{
1328232812Sjmallett    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1329232812Sjmallett    int decode = 0;
1330215990Sjmallett
1331232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1332232812Sjmallett        return -1;
1333215990Sjmallett
1334215990Sjmallett
1335232812Sjmallett    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1336215990Sjmallett
1337232812Sjmallett    if (l2c_vrt_ctl.s.enable) {
1338232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Changing the size of the memory after Virtualization is enabled is not allowed.\n");
1339232812Sjmallett        return -1;
1340215990Sjmallett    }
1341232812Sjmallett
1342232812Sjmallett    if (memsz >= (int)(cvmx_sysinfo_get()->system_dram_size / 1000000)) {
1343232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), greater than available on the chip\n",
1344232812Sjmallett                 memsz);
1345232812Sjmallett        return -1;
1346232812Sjmallett    }
1347232812Sjmallett
1348232812Sjmallett    decode = __cvmx_l2c_vrt_decode_memsize(memsz);
1349232812Sjmallett    if (decode == -1) {
1350232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), refer to L2C_VRT_CTL[MEMSZ] for more information\n",
1351232812Sjmallett                 memsz);
1352232812Sjmallett        return -1;
1353232812Sjmallett    }
1354232812Sjmallett
1355232812Sjmallett    l2c_vrt_ctl.s.memsz = decode;
1356232812Sjmallett    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1357215990Sjmallett    return 0;
1358215990Sjmallett}
1359215990Sjmallett
1360215990Sjmallett/**
1361215990Sjmallett * Set a Virtual ID to a set of cores.
1362215990Sjmallett *
1363215990Sjmallett * @param virtid    Assign virtid to a set of cores.
1364215990Sjmallett * @param coremask  The group of cores to assign a unique virtual id.
1365215990Sjmallett * @return          Return 0 on success, otherwise -1.
1366215990Sjmallett */
1367215990Sjmallettint cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask)
1368215990Sjmallett{
1369215990Sjmallett    uint32_t core = 0;
1370232812Sjmallett    int found = 0;
1371232812Sjmallett    int max_virtid;
1372215990Sjmallett
1373232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1374232812Sjmallett        return -1;
1375215990Sjmallett
1376232812Sjmallett    max_virtid = cvmx_l2c_vrt_get_max_virtids();
1377215990Sjmallett
1378232812Sjmallett    if (virtid > max_virtid) {
1379232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Max %d number of virtids are allowed, passed %d.\n",
1380232812Sjmallett                 max_virtid, virtid);
1381232812Sjmallett        return -1;
1382232812Sjmallett    }
1383215990Sjmallett
1384232812Sjmallett    while (core < cvmx_octeon_num_cores()) {
1385232812Sjmallett        if ((coremask >> core) & 1) {
1386232812Sjmallett            cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1387232812Sjmallett            cvmx_l2c_virtid_iobx_t l2c_virtid_iobx;
1388232812Sjmallett            l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1389215990Sjmallett
1390232812Sjmallett            /* Check if the core already has a virtid assigned. */
1391232812Sjmallett            if (l2c_virtid_ppx.s.id) {
1392232812Sjmallett                cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Changing virtid of core #%d to %d from %d.\n",
1393232812Sjmallett                         (unsigned int)core, virtid,
1394232812Sjmallett                         l2c_virtid_ppx.s.id);
1395232812Sjmallett
1396232812Sjmallett                /* Flush L2 cache to avoid write errors */
1397232812Sjmallett                cvmx_l2c_flush();
1398215990Sjmallett            }
1399232812Sjmallett            cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), virtid & 0x3f);
1400215990Sjmallett
1401232812Sjmallett            /* Set the IOB to normal mode. */
1402232812Sjmallett            l2c_virtid_iobx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_IOBX(core));
1403232812Sjmallett            l2c_virtid_iobx.s.id = 1;
1404232812Sjmallett            l2c_virtid_iobx.s.dwbid = 0;
1405232812Sjmallett            cvmx_write_csr(CVMX_L2C_VIRTID_IOBX(core),
1406232812Sjmallett                       l2c_virtid_iobx.u64);
1407232812Sjmallett            found = 1;
1408215990Sjmallett        }
1409232812Sjmallett        core++;
1410215990Sjmallett    }
1411232812Sjmallett
1412232812Sjmallett    /* Invalid coremask passed. */
1413232812Sjmallett    if (!found) {
1414232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Invalid coremask(0x%x) passed\n",
1415232812Sjmallett                 (unsigned int)coremask);
1416232812Sjmallett        return -1;
1417232812Sjmallett    }
1418215990Sjmallett    return 0;
1419215990Sjmallett}
1420215990Sjmallett
1421215990Sjmallett/**
1422215990Sjmallett * Remove a virt id assigned to a set of cores. Update the virtid mask and
1423232812Sjmallett * virtid stored for each core.
1424215990Sjmallett *
1425215990Sjmallett * @param virtid  Remove the specified Virtualization machine ID.
1426215990Sjmallett */
1427215990Sjmallettvoid cvmx_l2c_vrt_remove_virtid(int virtid)
1428215990Sjmallett{
1429232812Sjmallett    uint32_t core;
1430232812Sjmallett    cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1431215990Sjmallett
1432232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1433232812Sjmallett        return;
1434232812Sjmallett
1435232812Sjmallett    for (core = 0; core < cvmx_octeon_num_cores(); core++) {
1436232812Sjmallett        l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1437232812Sjmallett        if (virtid == l2c_virtid_ppx.s.id)
1438232812Sjmallett            cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), 0);
1439215990Sjmallett    }
1440215990Sjmallett}
1441215990Sjmallett
1442215990Sjmallett/**
1443215990Sjmallett * Helper function to protect the memory region based on the granularity.
1444215990Sjmallett */
1445215990Sjmallettstatic uint64_t __cvmx_l2c_vrt_get_granularity(void)
1446215990Sjmallett{
1447215990Sjmallett    uint64_t gran = 0;
1448215990Sjmallett
1449232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
1450215990Sjmallett        int nvid;
1451215990Sjmallett        uint64_t szd;
1452215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1453215990Sjmallett
1454215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1455215990Sjmallett        nvid = cvmx_l2c_vrt_get_max_virtids();
1456215990Sjmallett        szd = (1ull << l2c_vrt_ctl.s.memsz) * 1024 * 1024 * 1024;
1457215990Sjmallett        gran = (unsigned long long)(szd * nvid)/(32ull * 1024);
1458215990Sjmallett    }
1459215990Sjmallett    return gran;
1460215990Sjmallett}
1461215990Sjmallett
1462232812SjmallettCVMX_SHARED cvmx_spinlock_t cvmx_l2c_vrt_spinlock;
1463232812Sjmallett
1464215990Sjmallett/**
1465215990Sjmallett * Block a memory region to be updated for a given virtual id.
1466215990Sjmallett *
1467215990Sjmallett * @param start_addr   Starting address of memory region
1468215990Sjmallett * @param size         Size of the memory to protect
1469215990Sjmallett * @param virtid       Virtual ID to use
1470215990Sjmallett * @param mode         Allow/Disallow write access
1471215990Sjmallett *                        = 0,  Allow write access by virtid
1472215990Sjmallett *                        = 1,  Disallow write access by virtid
1473215990Sjmallett */
1474215990Sjmallettint cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode)
1475215990Sjmallett{
1476232812Sjmallett    uint64_t gran;
1477232812Sjmallett    uint64_t end_addr;
1478232812Sjmallett    int byte_offset, virtid_offset;
1479232812Sjmallett    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1480232812Sjmallett    cvmx_l2c_vrt_memx_t l2c_vrt_mem;
1481232812Sjmallett    cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1482232812Sjmallett    int found;
1483232812Sjmallett    uint32_t core;
1484215990Sjmallett
1485232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1486232812Sjmallett        return -1;
1487232812Sjmallett    /*
1488232812Sjmallett     * Check the alignment of start address, should be aligned to the
1489232812Sjmallett     * granularity.
1490232812Sjmallett     */
1491232812Sjmallett    gran = __cvmx_l2c_vrt_get_granularity();
1492232812Sjmallett    end_addr = start_addr + size;
1493232812Sjmallett    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1494215990Sjmallett
1495232812Sjmallett    /* No need to protect if virtualization is not enabled */
1496232812Sjmallett    if (!l2c_vrt_ctl.s.enable) {
1497232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization is not enabled.\n");
1498232812Sjmallett        return -1;
1499232812Sjmallett    }
1500215990Sjmallett
1501232812Sjmallett    if (virtid > cvmx_l2c_vrt_get_max_virtids()) {
1502232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id is greater than max allowed\n");
1503232812Sjmallett        return -1;
1504232812Sjmallett    }
1505232812Sjmallett
1506232812Sjmallett    /* No need to protect if virtid is not assigned to a core */
1507232812Sjmallett    found = 0;
1508232812Sjmallett    for (core = 0; core < cvmx_octeon_num_cores(); core++) {
1509232812Sjmallett        l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1510232812Sjmallett        if (l2c_virtid_ppx.s.id == virtid) {
1511232812Sjmallett            found = 1;
1512232812Sjmallett            break;
1513215990Sjmallett        }
1514232812Sjmallett    }
1515232812Sjmallett    if (found == 0) {
1516232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id (%d) is not assigned to any core.\n",
1517232812Sjmallett                 virtid);
1518232812Sjmallett        return -1;
1519232812Sjmallett    }
1520215990Sjmallett
1521232812Sjmallett    /*
1522232812Sjmallett     * Make sure previous stores are through before protecting the
1523232812Sjmallett     * memory.
1524232812Sjmallett     */
1525232812Sjmallett    CVMX_SYNCW;
1526215990Sjmallett
1527232812Sjmallett    /*
1528232812Sjmallett     * If the L2/DRAM physical address is >= 512 MB, subtract 256
1529232812Sjmallett     * MB to get the address to use. This is because L2C removes
1530232812Sjmallett     * the 256MB "hole" between DR0 and DR1.
1531232812Sjmallett     */
1532232812Sjmallett    if (start_addr >= (512 * 1024 * 1024))
1533232812Sjmallett        start_addr -= 256 * 1024 * 1024;
1534215990Sjmallett
1535232812Sjmallett    if (start_addr != ((start_addr + (gran - 1)) & ~(gran - 1))) {
1536232812Sjmallett        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Start address is not aligned\n");
1537232812Sjmallett        return -1;
1538232812Sjmallett    }
1539215990Sjmallett
1540232812Sjmallett    /*
1541232812Sjmallett     * Check the size of the memory to protect, should be aligned
1542232812Sjmallett     * to the granularity.
1543232812Sjmallett     */
1544232812Sjmallett    if (end_addr != ((end_addr + (gran - 1)) & ~(gran - 1))) {
1545232812Sjmallett        end_addr = (start_addr + (gran - 1)) & ~(gran - 1);
1546232812Sjmallett        size = start_addr - end_addr;
1547232812Sjmallett    }
1548215990Sjmallett
1549232812Sjmallett    byte_offset = l2c_vrt_ctl.s.memsz + l2c_vrt_ctl.s.numid + 16;
1550232812Sjmallett    virtid_offset = 14 - l2c_vrt_ctl.s.numid;
1551215990Sjmallett
1552232812Sjmallett    cvmx_spinlock_lock(&cvmx_l2c_vrt_spinlock);
1553215990Sjmallett
1554232812Sjmallett    /* Enable memory protection for each virtid for the specified range. */
1555232812Sjmallett    while (start_addr < end_addr) {
1556232812Sjmallett        /*
1557232812Sjmallett         * When L2C virtualization is enabled and a bit is set
1558232812Sjmallett         * in L2C_VRT_MEM(0..1023), then L2C prevents the
1559232812Sjmallett         * selected virtual machine from storing to the
1560232812Sjmallett         * selected L2C/DRAM region.
1561232812Sjmallett         */
1562232812Sjmallett        int offset, position, i;
1563232812Sjmallett        int l2c_vrt_mem_bit_index = start_addr >> byte_offset;
1564232812Sjmallett        l2c_vrt_mem_bit_index |= (virtid << virtid_offset);
1565215990Sjmallett
1566232812Sjmallett        offset = l2c_vrt_mem_bit_index >> 5;
1567232812Sjmallett        position = l2c_vrt_mem_bit_index & 0x1f;
1568215990Sjmallett
1569232812Sjmallett        l2c_vrt_mem.u64 = cvmx_read_csr(CVMX_L2C_VRT_MEMX(offset));
1570232812Sjmallett        /* Allow/Disallow write access to memory. */
1571232812Sjmallett        if (mode == 0)
1572232812Sjmallett            l2c_vrt_mem.s.data &= ~(1 << position);
1573232812Sjmallett        else
1574232812Sjmallett            l2c_vrt_mem.s.data |= 1 << position;
1575232812Sjmallett        l2c_vrt_mem.s.parity = 0;
1576232812Sjmallett        /* PARITY<i> is the even parity of DATA<i*8+7:i*8>, which means
1577232812Sjmallett         * that each bit<i> in PARITY[0..3], is the XOR of all the bits
1578232812Sjmallett         * in the corresponding byte in DATA.
1579232812Sjmallett         */
1580232812Sjmallett        for (i = 0; i <= 4; i++) {
1581232812Sjmallett            uint64_t mask = 0xffull << (i*8);
1582232812Sjmallett            if ((cvmx_pop(l2c_vrt_mem.s.data & mask) & 0x1))
1583232812Sjmallett                l2c_vrt_mem.s.parity |= (1ull << i);
1584215990Sjmallett        }
1585232812Sjmallett        cvmx_write_csr(CVMX_L2C_VRT_MEMX(offset), l2c_vrt_mem.u64);
1586232812Sjmallett        start_addr += gran;
1587232812Sjmallett    }
1588215990Sjmallett
1589232812Sjmallett    cvmx_spinlock_unlock(&cvmx_l2c_vrt_spinlock);
1590232812Sjmallett
1591215990Sjmallett    return 0;
1592215990Sjmallett}
1593215990Sjmallett
1594215990Sjmallett/**
1595215990Sjmallett * Enable virtualization.
1596215990Sjmallett *
1597215990Sjmallett * @param mode   Whether out of bound writes are an error.
1598215990Sjmallett */
1599215990Sjmallettvoid cvmx_l2c_vrt_enable(int mode)
1600215990Sjmallett{
1601232812Sjmallett    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1602215990Sjmallett
1603232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1604232812Sjmallett        return;
1605232812Sjmallett
1606232812Sjmallett    /* Enable global virtualization */
1607232812Sjmallett    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1608232812Sjmallett    l2c_vrt_ctl.s.ooberr = mode;
1609232812Sjmallett    l2c_vrt_ctl.s.enable = 1;
1610232812Sjmallett    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1611215990Sjmallett}
1612215990Sjmallett
1613215990Sjmallett/**
1614215990Sjmallett * Disable virtualization.
1615215990Sjmallett */
1616215990Sjmallettvoid cvmx_l2c_vrt_disable(void)
1617215990Sjmallett{
1618232812Sjmallett    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1619215990Sjmallett
1620232812Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1621232812Sjmallett        return;
1622232812Sjmallett
1623232812Sjmallett    /* Disable global virtualization */
1624232812Sjmallett    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1625232812Sjmallett    l2c_vrt_ctl.s.enable = 0;
1626232812Sjmallett    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1627215990Sjmallett}
1628232812Sjmallett#endif
1629