1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40/**
41 * @file
42 *
43 * Implementation of the Level 2 Cache (L2C) control,
44 * measurement, and debugging facilities.
45 *
46 * <hr>$Revision: 70215 $<hr>
47 *
48 */
49
50#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
51#include <asm/octeon/cvmx.h>
52#include <asm/octeon/cvmx-l2c.h>
53#include <asm/octeon/cvmx-spinlock.h>
54#else
55#if !defined(__FreeBSD__) || !defined(_KERNEL)
56#include "cvmx-config.h"
57#endif
58#include "cvmx.h"
59#include "cvmx-l2c.h"
60#include "cvmx-spinlock.h"
61#include "cvmx-interrupt.h"
62#endif
63
64#ifndef CVMX_BUILD_FOR_LINUX_HOST
65/*
66 * This spinlock is used internally to ensure that only one core is
67 * performing certain L2 operations at a time.
68 *
69 * NOTE: This only protects calls from within a single application -
70 * if multiple applications or operating systems are running, then it
71 * is up to the user program to coordinate between them.
72 */
73CVMX_SHARED cvmx_spinlock_t cvmx_l2c_spinlock;
74#endif
75
76int cvmx_l2c_get_core_way_partition(uint32_t core)
77{
78    uint32_t field;
79
80    /* Validate the core number */
81    if (core >= cvmx_octeon_num_cores())
82        return -1;
83
84    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
85        return (cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff);
86
87    /*
88     * Use the lower two bits of the coreNumber to determine the
89     * bit offset of the UMSK[] field in the L2C_SPAR register.
90     */
91    field = (core & 0x3) * 8;
92
93    /*
94     * Return the UMSK[] field from the appropriate L2C_SPAR
95     * register based on the coreNumber.
96     */
97
98    switch (core & 0xC) {
99    case 0x0:
100        return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
101    case 0x4:
102        return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
103    case 0x8:
104        return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
105    case 0xC:
106        return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
107    }
108    return 0;
109}
110
111int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
112{
113    uint32_t field;
114    uint32_t valid_mask;
115
116    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
117
118    mask &= valid_mask;
119
120    /* A UMSK setting which blocks all L2C Ways is an error on some chips */
121    if (mask == valid_mask && (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
122        return -1;
123
124    /* Validate the core number */
125    if (core >= cvmx_octeon_num_cores())
126        return -1;
127
128    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
129        cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
130        return 0;
131    }
132
133    /*
134     * Use the lower two bits of core to determine the bit offset of the
135     * UMSK[] field in the L2C_SPAR register.
136     */
137    field = (core & 0x3) * 8;
138
139    /*
140     * Assign the new mask setting to the UMSK[] field in the appropriate
141     * L2C_SPAR register based on the core_num.
142     *
143     */
144    switch (core & 0xC) {
145    case 0x0:
146        cvmx_write_csr(CVMX_L2C_SPAR0,
147                   (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
148                   mask << field);
149        break;
150    case 0x4:
151        cvmx_write_csr(CVMX_L2C_SPAR1,
152                   (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
153                   mask << field);
154        break;
155    case 0x8:
156        cvmx_write_csr(CVMX_L2C_SPAR2,
157                   (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
158                   mask << field);
159        break;
160    case 0xC:
161        cvmx_write_csr(CVMX_L2C_SPAR3,
162                   (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
163                   mask << field);
164        break;
165    }
166    return 0;
167}
168
169int cvmx_l2c_set_hw_way_partition(uint32_t mask)
170{
171    uint32_t valid_mask;
172
173    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
174    mask &= valid_mask;
175
176    /* A UMSK setting which blocks all L2C Ways is an error on some chips */
177    if (mask == valid_mask  && (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
178        return -1;
179
180    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
181        cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
182    else
183        cvmx_write_csr(CVMX_L2C_SPAR4,
184                   (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
185    return 0;
186}
187
188int cvmx_l2c_get_hw_way_partition(void)
189{
190    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
191        return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
192    else
193        return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
194}
195
196int cvmx_l2c_set_hw_way_partition2(uint32_t mask)
197{
198    uint32_t valid_mask;
199
200        if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
201            return -1;
202
203    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
204    mask &= valid_mask;
205        cvmx_write_csr(CVMX_L2C_WPAR_IOBX(1), mask);
206        return 0;
207}
208
209int cvmx_l2c_get_hw_way_partition2(void)
210{
211        if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) {
212            cvmx_warn("only one IOB on this chip");
213            return -1;
214        }
215        return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(1)) & 0xffff;
216}
217
218
219
220void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
221              uint32_t clear_on_read)
222{
223    if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
224        union cvmx_l2c_pfctl pfctl;
225
226        pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
227
228        switch (counter) {
229        case 0:
230            pfctl.s.cnt0sel = event;
231            pfctl.s.cnt0ena = 1;
232            pfctl.s.cnt0rdclr = clear_on_read;
233            break;
234        case 1:
235            pfctl.s.cnt1sel = event;
236            pfctl.s.cnt1ena = 1;
237            pfctl.s.cnt1rdclr = clear_on_read;
238            break;
239        case 2:
240            pfctl.s.cnt2sel = event;
241            pfctl.s.cnt2ena = 1;
242            pfctl.s.cnt2rdclr = clear_on_read;
243            break;
244        case 3:
245        default:
246            pfctl.s.cnt3sel = event;
247            pfctl.s.cnt3ena = 1;
248            pfctl.s.cnt3rdclr = clear_on_read;
249            break;
250        }
251
252        cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
253    } else {
254        union cvmx_l2c_tadx_prf l2c_tadx_prf;
255        int tad;
256
257        cvmx_warn("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
258
259        cvmx_warn_if(clear_on_read, "L2C counters don't support clear on read for this chip\n");
260
261        l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
262
263        switch (counter) {
264        case 0:
265            l2c_tadx_prf.s.cnt0sel = event;
266            break;
267        case 1:
268            l2c_tadx_prf.s.cnt1sel = event;
269            break;
270        case 2:
271            l2c_tadx_prf.s.cnt2sel = event;
272            break;
273        default:
274        case 3:
275            l2c_tadx_prf.s.cnt3sel = event;
276            break;
277        }
278        for (tad = 0; tad < CVMX_L2C_TADS; tad++)
279            cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
280                       l2c_tadx_prf.u64);
281    }
282}
283
284uint64_t cvmx_l2c_read_perf(uint32_t counter)
285{
286    switch (counter) {
287    case 0:
288        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
289            return cvmx_read_csr(CVMX_L2C_PFC0);
290        else {
291            uint64_t counter = 0;
292            int tad;
293            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
294                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
295            return counter;
296        }
297    case 1:
298        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
299            return cvmx_read_csr(CVMX_L2C_PFC1);
300        else {
301            uint64_t counter = 0;
302            int tad;
303            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
304                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
305            return counter;
306        }
307    case 2:
308        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
309            return cvmx_read_csr(CVMX_L2C_PFC2);
310        else {
311            uint64_t counter = 0;
312            int tad;
313            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
314                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
315            return counter;
316        }
317    case 3:
318    default:
319        if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
320            return cvmx_read_csr(CVMX_L2C_PFC3);
321        else {
322            uint64_t counter = 0;
323            int tad;
324            for (tad = 0; tad < CVMX_L2C_TADS; tad++)
325                counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
326            return counter;
327        }
328    }
329}
330
331#ifndef CVMX_BUILD_FOR_LINUX_HOST
332/**
333 * @INTERNAL
334 * Helper function use to fault in cache lines for L2 cache locking
335 *
336 * @param addr   Address of base of memory region to read into L2 cache
337 * @param len    Length (in bytes) of region to fault in
338 */
339static void fault_in(uint64_t addr, int len)
340{
341    volatile char *ptr;
342    volatile char dummy;
343    /*
344     * Adjust addr and length so we get all cache lines even for
345     * small ranges spanning two cache lines.
346     */
347    len += addr & CVMX_CACHE_LINE_MASK;
348    addr &= ~CVMX_CACHE_LINE_MASK;
349    ptr = (volatile char *)cvmx_phys_to_ptr(addr);
350    /*
351     * Invalidate L1 cache to make sure all loads result in data
352     * being in L2.
353     */
354    CVMX_DCACHE_INVALIDATE;
355    while (len > 0) {
356        dummy += *ptr;
357        len -= CVMX_CACHE_LINE_SIZE;
358        ptr += CVMX_CACHE_LINE_SIZE;
359    }
360}
361
362int cvmx_l2c_lock_line(uint64_t addr)
363{
364    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
365        int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
366        uint64_t assoc = cvmx_l2c_get_num_assoc();
367        uint32_t tag = cvmx_l2c_v2_address_to_tag(addr);
368        uint64_t indext =  cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT;
369        uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, indext);
370        uint64_t way;
371        uint32_t tad;
372        union cvmx_l2c_tadx_tag l2c_tadx_tag;
373
374        if (tag == 0xFFFFFFFF) {
375            cvmx_dprintf("ERROR: cvmx_l2c_lock_line: addr 0x%llx in LMC hole."
376                         "\n", (unsigned long long) addr);
377            return -1;
378        }
379
380        tad = cvmx_l2c_address_to_tad(addr);
381
382        /* cvmx_dprintf("shift=%d index=%lx tag=%x\n",shift, index, tag); */
383        CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
384        CVMX_SYNCW;
385        /* Make sure we were able to lock the line */
386        for (way = 0; way < assoc; way++) {
387            uint64_t caddr = index | (way << shift);
388            CVMX_CACHE_LTGL2I(caddr, 0);
389            /* make sure CVMX_L2C_TADX_TAG is updated */
390            CVMX_SYNC;
391            l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(tad));
392            if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
393                break;
394            /* cvmx_printf("caddr=%lx tad=%d tagu64=%lx valid=%x tag=%x \n", caddr,
395               tad, l2c_tadx_tag.u64, l2c_tadx_tag.s.valid, l2c_tadx_tag.s.tag); */
396        }
397
398        /* Check if a valid line is found */
399        if (way >= assoc) {
400            /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at"
401                         " 0x%llx address\n", (unsigned long long)addr); */
402            return -1;
403        }
404
405        /* Check if lock bit is not set */
406        if (!l2c_tadx_tag.s.lock) {
407             /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at "
408               "0x%llx address\n", (unsigned long long)addr); */
409            return -1;
410        }
411        return 0;
412    } else {
413        int retval = 0;
414        union cvmx_l2c_dbg l2cdbg;
415        union cvmx_l2c_lckbase lckbase;
416        union cvmx_l2c_lckoff lckoff;
417        union cvmx_l2t_err l2t_err;
418
419        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
420
421        l2cdbg.u64 = 0;
422        lckbase.u64 = 0;
423        lckoff.u64 = 0;
424
425        /* Clear l2t error bits if set */
426        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
427        l2t_err.s.lckerr = 1;
428        l2t_err.s.lckerr2 = 1;
429        cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
430
431        addr &= ~CVMX_CACHE_LINE_MASK;
432
433        /* Set this core as debug core */
434        l2cdbg.s.ppnum = cvmx_get_core_num();
435        CVMX_SYNC;
436        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
437        cvmx_read_csr(CVMX_L2C_DBG);
438
439        lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
440        cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
441        cvmx_read_csr(CVMX_L2C_LCKOFF);
442
443        if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
444            int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * cvmx_l2c_get_set_bits() - 1;
445            uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> cvmx_l2c_get_set_bits();
446            lckbase.s.lck_base = addr_tmp >> 7;
447        } else {
448            lckbase.s.lck_base = addr >> 7;
449        }
450
451        lckbase.s.lck_ena = 1;
452        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
453        /* Make sure it gets there */
454        cvmx_read_csr(CVMX_L2C_LCKBASE);
455
456        fault_in(addr, CVMX_CACHE_LINE_SIZE);
457
458        lckbase.s.lck_ena = 0;
459        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
460        /* Make sure it gets there */
461        cvmx_read_csr(CVMX_L2C_LCKBASE);
462
463        /* Stop being debug core */
464        cvmx_write_csr(CVMX_L2C_DBG, 0);
465        cvmx_read_csr(CVMX_L2C_DBG);
466
467        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
468        if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
469            retval = 1;  /* We were unable to lock the line */
470
471        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
472        return retval;
473    }
474}
475
476int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
477{
478    int retval = 0;
479
480    /* Round start/end to cache line boundaries */
481    len += start & CVMX_CACHE_LINE_MASK;
482    start &= ~CVMX_CACHE_LINE_MASK;
483    len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
484
485    while (len) {
486        if (cvmx_l2c_lock_line(start) != 0)
487            retval--;
488        start += CVMX_CACHE_LINE_SIZE;
489        len -= CVMX_CACHE_LINE_SIZE;
490    }
491    return retval;
492}
493
494void cvmx_l2c_flush(void)
495{
496    uint64_t assoc, set;
497    uint64_t n_assoc, n_set;
498
499    n_set = cvmx_l2c_get_num_sets();
500    n_assoc = cvmx_l2c_get_num_assoc();
501
502    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
503        uint64_t address;
504        /* These may look like constants, but they aren't... */
505        int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
506        int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
507        for (set = 0; set < n_set; set++) {
508            for (assoc = 0; assoc < n_assoc; assoc++) {
509                address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
510                               (assoc << assoc_shift) |    (set << set_shift));
511                CVMX_CACHE_WBIL2I(address, 0);
512            }
513        }
514    } else {
515        for (set = 0; set < n_set; set++)
516            for (assoc = 0; assoc < n_assoc; assoc++)
517                cvmx_l2c_flush_line(assoc, set);
518    }
519}
520
521int cvmx_l2c_unlock_line(uint64_t address)
522{
523    uint32_t tad = cvmx_l2c_address_to_tad(address);
524
525    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
526        int assoc;
527        union cvmx_l2c_tag tag;
528        uint32_t tag_addr;
529        uint32_t index = cvmx_l2c_address_to_index(address);
530
531        tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
532
533        /*
534         * For OcteonII, we can flush a line by using the physical
535         * address directly, so finding the cache line used by
536         * the address is only required to provide the proper
537         * return value for the function.
538         */
539        for (assoc = 0; assoc < cvmx_l2c_get_num_assoc(); assoc++) {
540            tag = cvmx_l2c_get_tag_v2(assoc, index, tad);
541
542            if (tag.s.V && (tag.s.addr == tag_addr)) {
543                CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
544                return tag.s.L;
545            }
546        }
547    } else {
548        int assoc;
549        union cvmx_l2c_tag tag;
550        uint32_t tag_addr;
551
552        uint32_t index = cvmx_l2c_address_to_index(address);
553
554        /* Compute portion of address that is stored in tag */
555        tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
556        for (assoc = 0; assoc < cvmx_l2c_get_num_assoc(); assoc++) {
557            tag = cvmx_l2c_get_tag_v2(assoc, index, tad);
558
559            if (tag.s.V && (tag.s.addr == tag_addr)) {
560                cvmx_l2c_flush_line(assoc, index);
561                return tag.s.L;
562            }
563        }
564    }
565    return 0;
566}
567
568int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
569{
570    int num_unlocked = 0;
571    /* Round start/end to cache line boundaries */
572    len += start & CVMX_CACHE_LINE_MASK;
573    start &= ~CVMX_CACHE_LINE_MASK;
574    len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
575    while (len > 0) {
576        num_unlocked += cvmx_l2c_unlock_line(start);
577        start += CVMX_CACHE_LINE_SIZE;
578        len -= CVMX_CACHE_LINE_SIZE;
579    }
580
581    return num_unlocked;
582}
583
584/*
585 * Internal l2c tag types.  These are converted to a generic structure
586 * that can be used on all chips.
587 */
588union __cvmx_l2c_tag {
589    uint64_t u64;
590#ifdef __BIG_ENDIAN_BITFIELD
591    struct cvmx_l2c_tag_cn50xx {
592        uint64_t reserved:40;
593        uint64_t V:1;        /* Line valid */
594        uint64_t D:1;        /* Line dirty */
595        uint64_t L:1;        /* Line locked */
596        uint64_t U:1;        /* Use, LRU eviction */
597        uint64_t addr:20;    /* Phys mem addr (33..14) */
598    } cn50xx;
599    struct cvmx_l2c_tag_cn30xx {
600        uint64_t reserved:41;
601        uint64_t V:1;        /* Line valid */
602        uint64_t D:1;        /* Line dirty */
603        uint64_t L:1;        /* Line locked */
604        uint64_t U:1;        /* Use, LRU eviction */
605        uint64_t addr:19;    /* Phys mem addr (33..15) */
606    } cn30xx;
607    struct cvmx_l2c_tag_cn31xx {
608        uint64_t reserved:42;
609        uint64_t V:1;        /* Line valid */
610        uint64_t D:1;        /* Line dirty */
611        uint64_t L:1;        /* Line locked */
612        uint64_t U:1;        /* Use, LRU eviction */
613        uint64_t addr:18;    /* Phys mem addr (33..16) */
614    } cn31xx;
615    struct cvmx_l2c_tag_cn38xx {
616        uint64_t reserved:43;
617        uint64_t V:1;        /* Line valid */
618        uint64_t D:1;        /* Line dirty */
619        uint64_t L:1;        /* Line locked */
620        uint64_t U:1;        /* Use, LRU eviction */
621        uint64_t addr:17;    /* Phys mem addr (33..17) */
622    } cn38xx;
623    struct cvmx_l2c_tag_cn58xx {
624        uint64_t reserved:44;
625        uint64_t V:1;        /* Line valid */
626        uint64_t D:1;        /* Line dirty */
627        uint64_t L:1;        /* Line locked */
628        uint64_t U:1;        /* Use, LRU eviction */
629        uint64_t addr:16;    /* Phys mem addr (33..18) */
630    } cn58xx;
631#else
632    struct cvmx_l2c_tag_cn50xx {
633        uint64_t addr:20;    /* Phys mem addr (33..14) */
634        uint64_t U:1;        /* Use, LRU eviction */
635        uint64_t L:1;        /* Line locked */
636        uint64_t D:1;        /* Line dirty */
637        uint64_t V:1;        /* Line valid */
638        uint64_t reserved:40;
639    } cn50xx;
640    struct cvmx_l2c_tag_cn30xx {
641        uint64_t addr:19;    /* Phys mem addr (33..15) */
642        uint64_t U:1;        /* Use, LRU eviction */
643        uint64_t L:1;        /* Line locked */
644        uint64_t D:1;        /* Line dirty */
645        uint64_t V:1;        /* Line valid */
646        uint64_t reserved:41;
647    } cn30xx;
648    struct cvmx_l2c_tag_cn31xx {
649        uint64_t addr:18;    /* Phys mem addr (33..16) */
650        uint64_t U:1;        /* Use, LRU eviction */
651        uint64_t L:1;        /* Line locked */
652        uint64_t D:1;        /* Line dirty */
653        uint64_t V:1;        /* Line valid */
654        uint64_t reserved:42;
655    } cn31xx;
656    struct cvmx_l2c_tag_cn38xx {
657        uint64_t addr:17;    /* Phys mem addr (33..17) */
658        uint64_t U:1;        /* Use, LRU eviction */
659        uint64_t L:1;        /* Line locked */
660        uint64_t D:1;        /* Line dirty */
661        uint64_t V:1;        /* Line valid */
662        uint64_t reserved:43;
663    } cn38xx;
664    struct cvmx_l2c_tag_cn58xx {
665        uint64_t addr:16;    /* Phys mem addr (33..18) */
666        uint64_t U:1;        /* Use, LRU eviction */
667        uint64_t L:1;        /* Line locked */
668        uint64_t D:1;        /* Line dirty */
669        uint64_t V:1;        /* Line valid */
670        uint64_t reserved:44;
671    } cn58xx;
672#endif
673    struct cvmx_l2c_tag_cn58xx cn56xx;    /* 2048 sets */
674    struct cvmx_l2c_tag_cn31xx cn52xx;    /* 512 sets */
675};
676
677
678/**
679 * @INTERNAL
680 * Function to read a L2C tag.  This code make the current core
681 * the 'debug core' for the L2.  This code must only be executed by
682 * 1 core at a time.
683 *
684 * @param assoc  Association (way) of the tag to dump
685 * @param index  Index of the cacheline
686 *
687 * @return The Octeon model specific tag structure.  This is
688 *         translated by a wrapper function to a generic form that is
689 *         easier for applications to use.
690 */
691static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
692{
693
694    uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
695    uint64_t core = cvmx_get_core_num();
696    union __cvmx_l2c_tag tag_val;
697    uint64_t dbg_addr = CVMX_L2C_DBG;
698    unsigned long flags;
699
700    union cvmx_l2c_dbg debug_val;
701    debug_val.u64 = 0;
702    /*
703     * For low core count parts, the core number is always small
704     * enough to stay in the correct field and not set any
705     * reserved bits.
706     */
707    debug_val.s.ppnum = core;
708    debug_val.s.l2t = 1;
709    debug_val.s.set = assoc;
710
711    cvmx_local_irq_save(flags);
712    /*
713     * Make sure core is quiet (no prefetches, etc.) before
714     * entering debug mode.
715     */
716    CVMX_SYNC;
717    /* Flush L1 to make sure debug load misses L1 */
718    CVMX_DCACHE_INVALIDATE;
719
720    /*
721     * The following must be done in assembly as when in debug
722     * mode all data loads from L2 return special debug data, not
723     * normal memory contents.  Also, interrupts must be disabled,
724     * since if an interrupt occurs while in debug mode the ISR
725     * will get debug data from all its memory * reads instead of
726     * the contents of memory.
727     */
728
729    asm volatile (
730        ".set push\n\t"
731        ".set mips64\n\t"
732        ".set noreorder\n\t"
733        "sd    %[dbg_val], 0(%[dbg_addr])\n\t"   /* Enter debug mode, wait for store */
734        "ld    $0, 0(%[dbg_addr])\n\t"
735        "ld    %[tag_val], 0(%[tag_addr])\n\t"   /* Read L2C tag data */
736        "sd    $0, 0(%[dbg_addr])\n\t"          /* Exit debug mode, wait for store */
737        "ld    $0, 0(%[dbg_addr])\n\t"
738        "cache 9, 0($0)\n\t"             /* Invalidate dcache to discard debug data */
739        ".set pop"
740        : [tag_val] "=r" (tag_val)
741        : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
742        : "memory");
743
744    cvmx_local_irq_restore(flags);
745
746    return tag_val;
747}
748
749
750union cvmx_l2c_tag cvmx_l2c_get_tag_v2(uint32_t association, uint32_t index, uint32_t tad)
751{
752    union cvmx_l2c_tag tag;
753    tag.u64 = 0;
754
755    if ((int)association >= cvmx_l2c_get_num_assoc()) {
756        cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
757        return tag;
758    }
759    if ((int)index >= cvmx_l2c_get_num_sets()) {
760        cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
761                 (int)index, cvmx_l2c_get_num_sets());
762        return tag;
763    }
764    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
765        union cvmx_l2c_tadx_tag l2c_tadx_tag;
766        uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
767                        (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
768                        (index << CVMX_L2C_IDX_ADDR_SHIFT));
769        /*
770         * Use L2 cache Index load tag cache instruction, as
771         * hardware loads the virtual tag for the L2 cache
772         * block with the contents of L2C_TAD0_TAG
773         * register.
774         */
775        if (tad > CVMX_L2C_TADS) {
776            cvmx_dprintf("ERROR: cvmx_l2c_get_tag_v2: TAD#%d out of range\n", (unsigned int)tad);
777            return tag;
778        }
779        CVMX_CACHE_LTGL2I(address, 0);
780        CVMX_SYNC;   /* make sure CVMX_L2C_TADX_TAG is updated */
781        l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(tad));
782
783        tag.s.V     = l2c_tadx_tag.s.valid;
784        tag.s.D     = l2c_tadx_tag.s.dirty;
785        tag.s.L     = l2c_tadx_tag.s.lock;
786        tag.s.U     = l2c_tadx_tag.s.use;
787        tag.s.addr  = l2c_tadx_tag.s.tag;
788    } else {
789        union __cvmx_l2c_tag tmp_tag;
790        /* __read_l2_tag is intended for internal use only */
791        tmp_tag = __read_l2_tag(association, index);
792
793        /*
794         * Convert all tag structure types to generic version,
795         * as it can represent all models.
796         */
797        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
798            tag.s.V    = tmp_tag.cn58xx.V;
799            tag.s.D    = tmp_tag.cn58xx.D;
800            tag.s.L    = tmp_tag.cn58xx.L;
801            tag.s.U    = tmp_tag.cn58xx.U;
802            tag.s.addr = tmp_tag.cn58xx.addr;
803        } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
804            tag.s.V    = tmp_tag.cn38xx.V;
805            tag.s.D    = tmp_tag.cn38xx.D;
806            tag.s.L    = tmp_tag.cn38xx.L;
807            tag.s.U    = tmp_tag.cn38xx.U;
808            tag.s.addr = tmp_tag.cn38xx.addr;
809        } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
810            tag.s.V    = tmp_tag.cn31xx.V;
811            tag.s.D    = tmp_tag.cn31xx.D;
812            tag.s.L    = tmp_tag.cn31xx.L;
813            tag.s.U    = tmp_tag.cn31xx.U;
814            tag.s.addr = tmp_tag.cn31xx.addr;
815        } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
816            tag.s.V    = tmp_tag.cn30xx.V;
817            tag.s.D    = tmp_tag.cn30xx.D;
818            tag.s.L    = tmp_tag.cn30xx.L;
819            tag.s.U    = tmp_tag.cn30xx.U;
820            tag.s.addr = tmp_tag.cn30xx.addr;
821        } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
822            tag.s.V    = tmp_tag.cn50xx.V;
823            tag.s.D    = tmp_tag.cn50xx.D;
824            tag.s.L    = tmp_tag.cn50xx.L;
825            tag.s.U    = tmp_tag.cn50xx.U;
826            tag.s.addr = tmp_tag.cn50xx.addr;
827        } else {
828            cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
829        }
830    }
831    return tag;
832}
833
834union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
835{
836    union cvmx_l2c_tag tag;
837    tag.u64 = 0;
838
839    if ((int)association >= cvmx_l2c_get_num_assoc()) {
840        cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
841        return tag;
842    }
843    if ((int)index >= cvmx_l2c_get_num_sets()) {
844        cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
845                 (int)index, cvmx_l2c_get_num_sets());
846        return tag;
847    }
848    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
849        union cvmx_l2c_tadx_tag l2c_tadx_tag;
850        uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
851                        (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
852                        (index << CVMX_L2C_IDX_ADDR_SHIFT));
853        if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
854            cvmx_dprintf("ERROR: Cannot use %s on OCTEON CN68XX, use cvmx_l2c_get_tag_v2 instead!\n",
855                     __func__);
856            return tag;
857        }
858        /*
859         * Use L2 cache Index load tag cache instruction, as
860         * hardware loads the virtual tag for the L2 cache
861         * block with the contents of L2C_TAD0_TAG
862         * register.
863         */
864        CVMX_CACHE_LTGL2I(address, 0);
865        CVMX_SYNC;   /* make sure CVMX_L2C_TADX_TAG is updated */
866        l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
867
868        tag.s.V     = l2c_tadx_tag.s.valid;
869        tag.s.D     = l2c_tadx_tag.s.dirty;
870        tag.s.L     = l2c_tadx_tag.s.lock;
871        tag.s.U     = l2c_tadx_tag.s.use;
872        tag.s.addr  = l2c_tadx_tag.s.tag;
873    } else {
874        union __cvmx_l2c_tag tmp_tag;
875        /* __read_l2_tag is intended for internal use only */
876        tmp_tag = __read_l2_tag(association, index);
877
878        /*
879         * Convert all tag structure types to generic version,
880         * as it can represent all models.
881         */
882        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
883            tag.s.V    = tmp_tag.cn58xx.V;
884            tag.s.D    = tmp_tag.cn58xx.D;
885            tag.s.L    = tmp_tag.cn58xx.L;
886            tag.s.U    = tmp_tag.cn58xx.U;
887            tag.s.addr = tmp_tag.cn58xx.addr;
888        } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
889            tag.s.V    = tmp_tag.cn38xx.V;
890            tag.s.D    = tmp_tag.cn38xx.D;
891            tag.s.L    = tmp_tag.cn38xx.L;
892            tag.s.U    = tmp_tag.cn38xx.U;
893            tag.s.addr = tmp_tag.cn38xx.addr;
894        } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
895            tag.s.V    = tmp_tag.cn31xx.V;
896            tag.s.D    = tmp_tag.cn31xx.D;
897            tag.s.L    = tmp_tag.cn31xx.L;
898            tag.s.U    = tmp_tag.cn31xx.U;
899            tag.s.addr = tmp_tag.cn31xx.addr;
900        } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
901            tag.s.V    = tmp_tag.cn30xx.V;
902            tag.s.D    = tmp_tag.cn30xx.D;
903            tag.s.L    = tmp_tag.cn30xx.L;
904            tag.s.U    = tmp_tag.cn30xx.U;
905            tag.s.addr = tmp_tag.cn30xx.addr;
906        } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
907            tag.s.V    = tmp_tag.cn50xx.V;
908            tag.s.D    = tmp_tag.cn50xx.D;
909            tag.s.L    = tmp_tag.cn50xx.L;
910            tag.s.U    = tmp_tag.cn50xx.U;
911            tag.s.addr = tmp_tag.cn50xx.addr;
912        } else {
913            cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
914        }
915    }
916    return tag;
917}
918#endif
919
920int cvmx_l2c_address_to_tad(uint64_t addr)
921{
922    uint32_t tad;
923    if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
924        cvmx_l2c_ctl_t l2c_ctl;
925        l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
926        if (!l2c_ctl.s.disidxalias) {
927            tad = ((addr >> 7) ^ (addr >> 12) ^ (addr >> 18)) & 3;
928        } else {
929            tad = (addr >> 7) & 3;
930        }
931    } else {
932        tad = 0;
933    }
934    return tad;
935}
936
937uint32_t cvmx_l2c_v2_address_to_tag(uint64_t addr)
938{
939#define DR0_END   ( (256 * 1024 * 1024) -1)
940#define DR1_START (512 * 1024 * 1024)
941#define L2_HOLE   (256 * 1024 * 1024)
942
943    if ( (addr > DR0_END) && (addr < DR1_START) ) return (uint32_t) (-1);
944    if (addr > DR1_START) addr = addr - L2_HOLE ;
945    addr = addr & 0x7FFFFFFFFULL;
946    return (uint32_t )(addr >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
947}
948
949uint32_t cvmx_l2c_address_to_index(uint64_t addr)
950{
951    uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
952    int indxalias = 0;
953
954    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
955        union cvmx_l2c_ctl l2c_ctl;
956        l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
957        indxalias = !l2c_ctl.s.disidxalias;
958    } else {
959        union cvmx_l2c_cfg l2c_cfg;
960        l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
961        indxalias = l2c_cfg.s.idxalias;
962    }
963
964    if (indxalias) {
965        if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
966            uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
967            idx ^= (idx / cvmx_l2c_get_num_sets()) & 0x3ff;
968            idx ^= a_14_12 & 0x3;
969            idx ^= a_14_12 << 2;
970        } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
971            uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
972            idx ^= idx / cvmx_l2c_get_num_sets();
973            idx ^= a_14_12;
974        } else {
975            idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
976        }
977    }
978    idx &= CVMX_L2C_IDX_MASK;
979    return idx;
980}
981
982int cvmx_l2c_get_cache_size_bytes(void)
983{
984    return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
985        CVMX_CACHE_LINE_SIZE;
986}
987
988/**
989 * Return log base 2 of the number of sets in the L2 cache
990 * @return
991 */
992int cvmx_l2c_get_set_bits(void)
993{
994    int l2_set_bits;
995    if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
996        l2_set_bits = 11;    /* 2048 sets */
997    else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
998        l2_set_bits = 10;    /* 1024 sets */
999    else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
1000        l2_set_bits = 9;    /* 512 sets */
1001    else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
1002        l2_set_bits = 8;    /* 256 sets */
1003    else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
1004        l2_set_bits = 7;    /* 128 sets */
1005    else {
1006        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
1007        l2_set_bits = 11;    /* 2048 sets */
1008    }
1009    return l2_set_bits;
1010}
1011
1012/* Return the number of sets in the L2 Cache */
1013int cvmx_l2c_get_num_sets(void)
1014{
1015    return 1 << cvmx_l2c_get_set_bits();
1016}
1017
1018/* Return the number of associations in the L2 Cache */
1019int cvmx_l2c_get_num_assoc(void)
1020{
1021    int l2_assoc;
1022    if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
1023        OCTEON_IS_MODEL(OCTEON_CN52XX) ||
1024        OCTEON_IS_MODEL(OCTEON_CN58XX) ||
1025        OCTEON_IS_MODEL(OCTEON_CN50XX) ||
1026        OCTEON_IS_MODEL(OCTEON_CN38XX))
1027        l2_assoc = 8;
1028    else if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
1029        l2_assoc = 16;
1030    else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
1031         OCTEON_IS_MODEL(OCTEON_CN30XX))
1032        l2_assoc = 4;
1033    else {
1034        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
1035        l2_assoc = 8;
1036    }
1037
1038    /* Check to see if part of the cache is disabled */
1039    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
1040        union cvmx_mio_fus_dat3 mio_fus_dat3;
1041
1042        mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
1043        /*
1044         * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
1045         * <2> will be not used for 63xx
1046         * <1> disables 1/2 ways
1047         * <0> disables 1/4 ways
1048         * They are cumulative, so for 63xx:
1049         * <1> <0>
1050         * 0 0 16-way 2MB cache
1051         * 0 1 12-way 1.5MB cache
1052         * 1 0 8-way 1MB cache
1053         * 1 1 4-way 512KB cache
1054         */
1055
1056        if (mio_fus_dat3.cn63xx.l2c_crip == 3)
1057            l2_assoc = 4;
1058        else if (mio_fus_dat3.cn63xx.l2c_crip == 2)
1059            l2_assoc = 8;
1060        else if (mio_fus_dat3.cn63xx.l2c_crip == 1)
1061            l2_assoc = 12;
1062    } else {
1063        union cvmx_l2d_fus3 val;
1064        val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
1065        /*
1066         * Using shifts here, as bit position names are
1067         * different for each model but they all mean the
1068         * same.
1069         */
1070        if ((val.u64 >> 35) & 0x1)
1071            l2_assoc = l2_assoc >> 2;
1072        else if ((val.u64 >> 34) & 0x1)
1073            l2_assoc = l2_assoc >> 1;
1074    }
1075    return l2_assoc;
1076}
1077
1078#ifndef CVMX_BUILD_FOR_LINUX_HOST
1079/**
1080 * Flush a line from the L2 cache
1081 * This should only be called from one core at a time, as this routine
1082 * sets the core to the 'debug' core in order to flush the line.
1083 *
1084 * @param assoc  Association (or way) to flush
1085 * @param index  Index to flush
1086 */
1087void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
1088{
1089    /* Check the range of the index. */
1090    if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
1091        cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
1092        return;
1093    }
1094
1095    /* Check the range of association. */
1096    if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
1097        cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
1098        return;
1099    }
1100
1101    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
1102        uint64_t address;
1103        /* Create the address based on index and association.
1104         * Bits<20:17> select the way of the cache block involved in
1105         *             the operation
1106         * Bits<16:7> of the effect address select the index
1107         */
1108        address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
1109                (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
1110                (index << CVMX_L2C_IDX_ADDR_SHIFT));
1111        CVMX_CACHE_WBIL2I(address, 0);
1112    } else {
1113        union cvmx_l2c_dbg l2cdbg;
1114
1115        l2cdbg.u64 = 0;
1116        if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
1117            l2cdbg.s.ppnum = cvmx_get_core_num();
1118        l2cdbg.s.finv = 1;
1119
1120        l2cdbg.s.set = assoc;
1121        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
1122        /*
1123         * Enter debug mode, and make sure all other writes
1124         * complete before we enter debug mode
1125         */
1126        CVMX_SYNC;
1127        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
1128        cvmx_read_csr(CVMX_L2C_DBG);
1129
1130        CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
1131                            index * CVMX_CACHE_LINE_SIZE),
1132                       0);
1133        /* Exit debug mode */
1134        CVMX_SYNC;
1135        cvmx_write_csr(CVMX_L2C_DBG, 0);
1136        cvmx_read_csr(CVMX_L2C_DBG);
1137        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
1138    }
1139}
1140#endif
1141
1142/**
1143 * Initialize the BIG address in L2C+DRAM to generate proper error
1144 * on reading/writing to an non-existant memory location.
1145 *
1146 * @param mem_size  Amount of DRAM configured in MB.
1147 * @param mode      Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
1148 */
1149void cvmx_l2c_set_big_size(uint64_t mem_size, int mode)
1150{
1151    if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
1152         && !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
1153    {
1154        cvmx_l2c_big_ctl_t big_ctl;
1155        int bits = 0, zero_bits = 0;
1156        uint64_t mem;
1157
1158        if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024))
1159        {
1160            cvmx_dprintf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
1161                (unsigned long long)mem_size, (unsigned long long)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
1162            mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
1163        }
1164
1165        mem = mem_size;
1166        while (mem)
1167        {
1168            if ((mem & 1) == 0)
1169                zero_bits++;
1170            bits++;
1171            mem >>= 1;
1172        }
1173
1174        if ((bits - zero_bits) != 1 || (bits - 9) <= 0)
1175        {
1176            cvmx_dprintf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n", (unsigned long long)mem_size);
1177            return;
1178        }
1179
1180        big_ctl.u64 = 0;
1181        big_ctl.s.maxdram = bits - 9;
1182        big_ctl.s.disable = mode;
1183        cvmx_write_csr(CVMX_L2C_BIG_CTL, big_ctl.u64);
1184    }
1185}
1186
1187#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_LINUX_KERNEL)
1188/* L2C Virtualization APIs. These APIs are based on Octeon II documentation. */
1189
1190/*
1191 * These could be used by the Linux kernel, but currently are not, so
1192 * disable them to save space.
1193 */
1194
1195/**
1196 * @INTERNAL
1197 * Helper function to decode VALUE to number of allowed virtualization IDS.
1198 * Returns L2C_VRT_CTL[NUMID].
1199 *
1200 * @param nvid     Number of virtual Ids.
1201 * @return         On success decode to NUMID, or to -1 on failure.
1202 */
1203static inline int __cvmx_l2c_vrt_decode_numid(int nvid)
1204{
1205    int bits = -1;
1206    int zero_bits = -1;
1207
1208    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1209        return -1;
1210
1211    if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
1212        cvmx_dprintf("WARNING: Invalid number of virtual ids(%d) requested, should be <= 64\n",
1213                 nvid);
1214        return bits;
1215    }
1216
1217    while (nvid) {
1218        if ((nvid & 1) == 0)
1219            zero_bits++;
1220
1221        bits++;
1222        nvid >>= 1;
1223    }
1224
1225    if (bits == 1 || (zero_bits && ((bits - zero_bits) == 1)))
1226        return zero_bits;
1227    return -1;
1228}
1229
1230/**
1231 * Set maxium number of Virtual IDs allowed in a machine.
1232 *
1233 * @param nvid   Number of virtial ids allowed in a machine.
1234 * @return       Return 0 on success or -1 on failure.
1235 */
1236int cvmx_l2c_vrt_set_max_virtids(int nvid)
1237{
1238    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1239
1240    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1241        return -1;
1242
1243    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1244
1245    if (l2c_vrt_ctl.s.enable) {
1246        cvmx_dprintf("WARNING: Changing number of Virtual Machine IDs is not allowed after Virtualization is enabled\n");
1247        return -1;
1248    }
1249
1250    if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
1251        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_max_virtids: Invalid number of Virtual Machine IDs(%d) requested, max allowed %d\n",
1252                 nvid, CVMX_L2C_VRT_MAX_VIRTID_ALLOWED);
1253        return -1;
1254    }
1255
1256    /* Calculate the numid based on nvid */
1257    l2c_vrt_ctl.s.numid = __cvmx_l2c_vrt_decode_numid(nvid);
1258    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1259    return 0;
1260}
1261
1262/**
1263 * Get maxium number of virtual IDs allowed in a machine.
1264 *
1265 * @return  Return number of virtual machine IDs or -1 on failure.
1266 */
1267int cvmx_l2c_vrt_get_max_virtids(void)
1268{
1269    int virtids;
1270    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1271
1272    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1273        return -1;
1274
1275    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1276    virtids = 1 << (l2c_vrt_ctl.s.numid + 1);
1277    if (virtids > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED) {
1278        cvmx_dprintf("WARNING: cvmx_l2c_vrt_get_max_virtids: Invalid number of Virtual IDs initialized (%d)\n",
1279                 virtids);
1280        return -1;
1281    }
1282    return virtids;
1283}
1284
1285/**
1286 * @INTERNAL
1287 * Helper function to decode VALUE to memory space coverage of L2C_VRT_MEM.
1288 * Returns L2C_VRT_CTL[MEMSZ].
1289 *
1290 * @param memsz    Memory in GB.
1291 * @return         On success, decode to MEMSZ, or on failure return -1.
1292 */
1293static inline int __cvmx_l2c_vrt_decode_memsize(int memsz)
1294{
1295    int bits = 0;
1296    int zero_bits = 0;
1297
1298    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1299        return -1;
1300
1301    if (memsz == 0 || memsz > CVMX_L2C_MAX_MEMSZ_ALLOWED) {
1302        cvmx_dprintf("WARNING: Invalid virtual memory size(%d) requested, should be <= %d\n",
1303                 memsz, CVMX_L2C_MAX_MEMSZ_ALLOWED);
1304        return -1;
1305    }
1306
1307    while (memsz) {
1308        if ((memsz & 1) == 0)
1309            zero_bits++;
1310
1311        bits++;
1312        memsz >>= 1;
1313    }
1314
1315    if (bits == 1 || (bits - zero_bits) == 1)
1316        return zero_bits;
1317    return -1;
1318}
1319
1320/**
1321 * Set the maxium size of memory space to be allocated for virtualization.
1322 *
1323 * @param memsz  Size of the virtual memory in GB
1324 * @return       Return 0 on success or -1 on failure.
1325 */
1326int cvmx_l2c_vrt_set_max_memsz(int memsz)
1327{
1328    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1329    int decode = 0;
1330
1331    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1332        return -1;
1333
1334
1335    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1336
1337    if (l2c_vrt_ctl.s.enable) {
1338        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Changing the size of the memory after Virtualization is enabled is not allowed.\n");
1339        return -1;
1340    }
1341
1342    if (memsz >= (int)(cvmx_sysinfo_get()->system_dram_size / 1000000)) {
1343        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), greater than available on the chip\n",
1344                 memsz);
1345        return -1;
1346    }
1347
1348    decode = __cvmx_l2c_vrt_decode_memsize(memsz);
1349    if (decode == -1) {
1350        cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), refer to L2C_VRT_CTL[MEMSZ] for more information\n",
1351                 memsz);
1352        return -1;
1353    }
1354
1355    l2c_vrt_ctl.s.memsz = decode;
1356    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1357    return 0;
1358}
1359
1360/**
1361 * Set a Virtual ID to a set of cores.
1362 *
1363 * @param virtid    Assign virtid to a set of cores.
1364 * @param coremask  The group of cores to assign a unique virtual id.
1365 * @return          Return 0 on success, otherwise -1.
1366 */
1367int cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask)
1368{
1369    uint32_t core = 0;
1370    int found = 0;
1371    int max_virtid;
1372
1373    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1374        return -1;
1375
1376    max_virtid = cvmx_l2c_vrt_get_max_virtids();
1377
1378    if (virtid > max_virtid) {
1379        cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Max %d number of virtids are allowed, passed %d.\n",
1380                 max_virtid, virtid);
1381        return -1;
1382    }
1383
1384    while (core < cvmx_octeon_num_cores()) {
1385        if ((coremask >> core) & 1) {
1386            cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1387            cvmx_l2c_virtid_iobx_t l2c_virtid_iobx;
1388            l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1389
1390            /* Check if the core already has a virtid assigned. */
1391            if (l2c_virtid_ppx.s.id) {
1392                cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Changing virtid of core #%d to %d from %d.\n",
1393                         (unsigned int)core, virtid,
1394                         l2c_virtid_ppx.s.id);
1395
1396                /* Flush L2 cache to avoid write errors */
1397                cvmx_l2c_flush();
1398            }
1399            cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), virtid & 0x3f);
1400
1401            /* Set the IOB to normal mode. */
1402            l2c_virtid_iobx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_IOBX(core));
1403            l2c_virtid_iobx.s.id = 1;
1404            l2c_virtid_iobx.s.dwbid = 0;
1405            cvmx_write_csr(CVMX_L2C_VIRTID_IOBX(core),
1406                       l2c_virtid_iobx.u64);
1407            found = 1;
1408        }
1409        core++;
1410    }
1411
1412    /* Invalid coremask passed. */
1413    if (!found) {
1414        cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Invalid coremask(0x%x) passed\n",
1415                 (unsigned int)coremask);
1416        return -1;
1417    }
1418    return 0;
1419}
1420
1421/**
1422 * Remove a virt id assigned to a set of cores. Update the virtid mask and
1423 * virtid stored for each core.
1424 *
1425 * @param virtid  Remove the specified Virtualization machine ID.
1426 */
1427void cvmx_l2c_vrt_remove_virtid(int virtid)
1428{
1429    uint32_t core;
1430    cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1431
1432    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1433        return;
1434
1435    for (core = 0; core < cvmx_octeon_num_cores(); core++) {
1436        l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1437        if (virtid == l2c_virtid_ppx.s.id)
1438            cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), 0);
1439    }
1440}
1441
1442/**
1443 * Helper function to protect the memory region based on the granularity.
1444 */
1445static uint64_t __cvmx_l2c_vrt_get_granularity(void)
1446{
1447    uint64_t gran = 0;
1448
1449    if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
1450        int nvid;
1451        uint64_t szd;
1452        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1453
1454        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1455        nvid = cvmx_l2c_vrt_get_max_virtids();
1456        szd = (1ull << l2c_vrt_ctl.s.memsz) * 1024 * 1024 * 1024;
1457        gran = (unsigned long long)(szd * nvid)/(32ull * 1024);
1458    }
1459    return gran;
1460}
1461
1462CVMX_SHARED cvmx_spinlock_t cvmx_l2c_vrt_spinlock;
1463
1464/**
1465 * Block a memory region to be updated for a given virtual id.
1466 *
1467 * @param start_addr   Starting address of memory region
1468 * @param size         Size of the memory to protect
1469 * @param virtid       Virtual ID to use
1470 * @param mode         Allow/Disallow write access
1471 *                        = 0,  Allow write access by virtid
1472 *                        = 1,  Disallow write access by virtid
1473 */
1474int cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode)
1475{
1476    uint64_t gran;
1477    uint64_t end_addr;
1478    int byte_offset, virtid_offset;
1479    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1480    cvmx_l2c_vrt_memx_t l2c_vrt_mem;
1481    cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1482    int found;
1483    uint32_t core;
1484
1485    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1486        return -1;
1487    /*
1488     * Check the alignment of start address, should be aligned to the
1489     * granularity.
1490     */
1491    gran = __cvmx_l2c_vrt_get_granularity();
1492    end_addr = start_addr + size;
1493    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1494
1495    /* No need to protect if virtualization is not enabled */
1496    if (!l2c_vrt_ctl.s.enable) {
1497        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization is not enabled.\n");
1498        return -1;
1499    }
1500
1501    if (virtid > cvmx_l2c_vrt_get_max_virtids()) {
1502        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id is greater than max allowed\n");
1503        return -1;
1504    }
1505
1506    /* No need to protect if virtid is not assigned to a core */
1507    found = 0;
1508    for (core = 0; core < cvmx_octeon_num_cores(); core++) {
1509        l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1510        if (l2c_virtid_ppx.s.id == virtid) {
1511            found = 1;
1512            break;
1513        }
1514    }
1515    if (found == 0) {
1516        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id (%d) is not assigned to any core.\n",
1517                 virtid);
1518        return -1;
1519    }
1520
1521    /*
1522     * Make sure previous stores are through before protecting the
1523     * memory.
1524     */
1525    CVMX_SYNCW;
1526
1527    /*
1528     * If the L2/DRAM physical address is >= 512 MB, subtract 256
1529     * MB to get the address to use. This is because L2C removes
1530     * the 256MB "hole" between DR0 and DR1.
1531     */
1532    if (start_addr >= (512 * 1024 * 1024))
1533        start_addr -= 256 * 1024 * 1024;
1534
1535    if (start_addr != ((start_addr + (gran - 1)) & ~(gran - 1))) {
1536        cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Start address is not aligned\n");
1537        return -1;
1538    }
1539
1540    /*
1541     * Check the size of the memory to protect, should be aligned
1542     * to the granularity.
1543     */
1544    if (end_addr != ((end_addr + (gran - 1)) & ~(gran - 1))) {
1545        end_addr = (start_addr + (gran - 1)) & ~(gran - 1);
1546        size = start_addr - end_addr;
1547    }
1548
1549    byte_offset = l2c_vrt_ctl.s.memsz + l2c_vrt_ctl.s.numid + 16;
1550    virtid_offset = 14 - l2c_vrt_ctl.s.numid;
1551
1552    cvmx_spinlock_lock(&cvmx_l2c_vrt_spinlock);
1553
1554    /* Enable memory protection for each virtid for the specified range. */
1555    while (start_addr < end_addr) {
1556        /*
1557         * When L2C virtualization is enabled and a bit is set
1558         * in L2C_VRT_MEM(0..1023), then L2C prevents the
1559         * selected virtual machine from storing to the
1560         * selected L2C/DRAM region.
1561         */
1562        int offset, position, i;
1563        int l2c_vrt_mem_bit_index = start_addr >> byte_offset;
1564        l2c_vrt_mem_bit_index |= (virtid << virtid_offset);
1565
1566        offset = l2c_vrt_mem_bit_index >> 5;
1567        position = l2c_vrt_mem_bit_index & 0x1f;
1568
1569        l2c_vrt_mem.u64 = cvmx_read_csr(CVMX_L2C_VRT_MEMX(offset));
1570        /* Allow/Disallow write access to memory. */
1571        if (mode == 0)
1572            l2c_vrt_mem.s.data &= ~(1 << position);
1573        else
1574            l2c_vrt_mem.s.data |= 1 << position;
1575        l2c_vrt_mem.s.parity = 0;
1576        /* PARITY<i> is the even parity of DATA<i*8+7:i*8>, which means
1577         * that each bit<i> in PARITY[0..3], is the XOR of all the bits
1578         * in the corresponding byte in DATA.
1579         */
1580        for (i = 0; i <= 4; i++) {
1581            uint64_t mask = 0xffull << (i*8);
1582            if ((cvmx_pop(l2c_vrt_mem.s.data & mask) & 0x1))
1583                l2c_vrt_mem.s.parity |= (1ull << i);
1584        }
1585        cvmx_write_csr(CVMX_L2C_VRT_MEMX(offset), l2c_vrt_mem.u64);
1586        start_addr += gran;
1587    }
1588
1589    cvmx_spinlock_unlock(&cvmx_l2c_vrt_spinlock);
1590
1591    return 0;
1592}
1593
1594/**
1595 * Enable virtualization.
1596 *
1597 * @param mode   Whether out of bound writes are an error.
1598 */
1599void cvmx_l2c_vrt_enable(int mode)
1600{
1601    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1602
1603    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1604        return;
1605
1606    /* Enable global virtualization */
1607    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1608    l2c_vrt_ctl.s.ooberr = mode;
1609    l2c_vrt_ctl.s.enable = 1;
1610    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1611}
1612
1613/**
1614 * Disable virtualization.
1615 */
1616void cvmx_l2c_vrt_disable(void)
1617{
1618    cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1619
1620    if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
1621        return;
1622
1623    /* Disable global virtualization */
1624    l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1625    l2c_vrt_ctl.s.enable = 0;
1626    cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1627}
1628#endif
1629