1210284Sjmallett/***********************license start***************
2215990Sjmallett * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3215990Sjmallett * reserved.
4210284Sjmallett *
5210284Sjmallett *
6215990Sjmallett * Redistribution and use in source and binary forms, with or without
7215990Sjmallett * modification, are permitted provided that the following conditions are
8215990Sjmallett * met:
9210284Sjmallett *
10215990Sjmallett *   * Redistributions of source code must retain the above copyright
11215990Sjmallett *     notice, this list of conditions and the following disclaimer.
12210284Sjmallett *
13215990Sjmallett *   * Redistributions in binary form must reproduce the above
14215990Sjmallett *     copyright notice, this list of conditions and the following
15215990Sjmallett *     disclaimer in the documentation and/or other materials provided
16215990Sjmallett *     with the distribution.
17215990Sjmallett
18215990Sjmallett *   * Neither the name of Cavium Networks nor the names of
19215990Sjmallett *     its contributors may be used to endorse or promote products
20215990Sjmallett *     derived from this software without specific prior written
21215990Sjmallett *     permission.
22215990Sjmallett
23215990Sjmallett * This Software, including technical data, may be subject to U.S. export  control
24215990Sjmallett * laws, including the U.S. Export Administration Act and its  associated
25215990Sjmallett * regulations, and may be subject to export or import  regulations in other
26215990Sjmallett * countries.
27215990Sjmallett
28215990Sjmallett * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29215990Sjmallett * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30215990Sjmallett * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31215990Sjmallett * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32215990Sjmallett * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33215990Sjmallett * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34215990Sjmallett * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35215990Sjmallett * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36215990Sjmallett * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37215990Sjmallett * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38210284Sjmallett ***********************license end**************************************/
39210284Sjmallett
40210284Sjmallett
41210284Sjmallett
42210284Sjmallett
43210284Sjmallett
44210284Sjmallett
45215990Sjmallett
46210284Sjmallett/**
47210284Sjmallett * @file
48210284Sjmallett *
49210284Sjmallett * Implementation of the Level 2 Cache (L2C) control,
50210284Sjmallett * measurement, and debugging facilities.
51210284Sjmallett *
52215990Sjmallett * <hr>$Revision: 52004 $<hr>
53210284Sjmallett *
54210284Sjmallett */
55215990Sjmallett#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
56215990Sjmallett#include <asm/octeon/cvmx.h>
57215990Sjmallett#include <asm/octeon/cvmx-l2c.h>
58215990Sjmallett#include <asm/octeon/cvmx-spinlock.h>
59215990Sjmallett#else
60215990Sjmallett#if !defined(__FreeBSD__) || !defined(_KERNEL)
61210284Sjmallett#include "cvmx-config.h"
62215990Sjmallett#endif
63210284Sjmallett#include "cvmx.h"
64210284Sjmallett#include "cvmx-l2c.h"
65210284Sjmallett#include "cvmx-spinlock.h"
66210284Sjmallett#include "cvmx-interrupt.h"
67215990Sjmallett#endif
68210284Sjmallett
69210284Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
70210284Sjmallett/* This spinlock is used internally to ensure that only one core is performing
71210284Sjmallett** certain L2 operations at a time.
72210284Sjmallett**
73210284Sjmallett** NOTE: This only protects calls from within a single application - if multiple applications
74210284Sjmallett** or operating systems are running, then it is up to the user program to coordinate between them.
75210284Sjmallett*/
76210284SjmallettCVMX_SHARED cvmx_spinlock_t cvmx_l2c_spinlock;
77210284Sjmallett#endif
78210284Sjmallett
79215990SjmallettCVMX_SHARED cvmx_spinlock_t cvmx_l2c_vrt_spinlock;
80215990Sjmallett
81210284Sjmallettint cvmx_l2c_get_core_way_partition(uint32_t core)
82210284Sjmallett{
83210284Sjmallett    uint32_t    field;
84210284Sjmallett
85210284Sjmallett    /* Validate the core number */
86210284Sjmallett    if (core >= cvmx_octeon_num_cores())
87210284Sjmallett        return -1;
88210284Sjmallett
89215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
90215990Sjmallett        return (cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff);
91215990Sjmallett
92210284Sjmallett    /* Use the lower two bits of the coreNumber to determine the bit offset
93210284Sjmallett     * of the UMSK[] field in the L2C_SPAR register.
94210284Sjmallett     */
95210284Sjmallett    field = (core & 0x3) * 8;
96210284Sjmallett
97210284Sjmallett    /* Return the UMSK[] field from the appropriate L2C_SPAR register based
98210284Sjmallett     * on the coreNumber.
99210284Sjmallett     */
100210284Sjmallett
101210284Sjmallett    switch (core & 0xC)
102210284Sjmallett    {
103210284Sjmallett        case 0x0:
104210284Sjmallett            return((cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field);
105210284Sjmallett        case 0x4:
106210284Sjmallett            return((cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field);
107210284Sjmallett        case 0x8:
108210284Sjmallett            return((cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field);
109210284Sjmallett        case 0xC:
110210284Sjmallett            return((cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field);
111210284Sjmallett    }
112210284Sjmallett    return(0);
113210284Sjmallett}
114210284Sjmallett
115210284Sjmallettint cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
116210284Sjmallett{
117210284Sjmallett    uint32_t    field;
118210284Sjmallett    uint32_t    valid_mask;
119210284Sjmallett
120210284Sjmallett    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
121210284Sjmallett
122210284Sjmallett    mask &= valid_mask;
123210284Sjmallett
124215990Sjmallett    /* A UMSK setting which blocks all L2C Ways is an error on some chips */
125215990Sjmallett    if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
126210284Sjmallett        return -1;
127210284Sjmallett
128210284Sjmallett    /* Validate the core number */
129210284Sjmallett    if (core >= cvmx_octeon_num_cores())
130210284Sjmallett        return -1;
131210284Sjmallett
132215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
133215990Sjmallett    {
134215990Sjmallett       cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
135215990Sjmallett       return 0;
136215990Sjmallett    }
137210284Sjmallett
138210284Sjmallett    /* Use the lower two bits of core to determine the bit offset of the
139210284Sjmallett     * UMSK[] field in the L2C_SPAR register.
140210284Sjmallett     */
141210284Sjmallett    field = (core & 0x3) * 8;
142210284Sjmallett
143210284Sjmallett    /* Assign the new mask setting to the UMSK[] field in the appropriate
144210284Sjmallett     * L2C_SPAR register based on the core_num.
145210284Sjmallett     *
146210284Sjmallett     */
147210284Sjmallett    switch (core & 0xC)
148210284Sjmallett    {
149210284Sjmallett        case 0x0:
150210284Sjmallett            cvmx_write_csr(CVMX_L2C_SPAR0,
151210284Sjmallett                           (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
152210284Sjmallett                           mask << field);
153210284Sjmallett            break;
154210284Sjmallett        case 0x4:
155210284Sjmallett            cvmx_write_csr(CVMX_L2C_SPAR1,
156210284Sjmallett                           (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
157210284Sjmallett                           mask << field);
158210284Sjmallett            break;
159210284Sjmallett        case 0x8:
160210284Sjmallett            cvmx_write_csr(CVMX_L2C_SPAR2,
161210284Sjmallett                           (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
162210284Sjmallett                           mask << field);
163210284Sjmallett            break;
164210284Sjmallett        case 0xC:
165210284Sjmallett            cvmx_write_csr(CVMX_L2C_SPAR3,
166210284Sjmallett                           (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
167210284Sjmallett                           mask << field);
168210284Sjmallett            break;
169210284Sjmallett    }
170210284Sjmallett    return 0;
171210284Sjmallett}
172210284Sjmallett
173210284Sjmallett
174210284Sjmallettint cvmx_l2c_set_hw_way_partition(uint32_t mask)
175210284Sjmallett{
176210284Sjmallett    uint32_t valid_mask;
177210284Sjmallett
178210284Sjmallett    valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
179210284Sjmallett    mask &= valid_mask;
180210284Sjmallett
181215990Sjmallett    /* A UMSK setting which blocks all L2C Ways is an error on some chips */
182215990Sjmallett    if (mask == valid_mask  && !OCTEON_IS_MODEL(OCTEON_CN63XX))
183210284Sjmallett        return -1;
184210284Sjmallett
185215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
186215990Sjmallett        cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
187215990Sjmallett    else
188215990Sjmallett        cvmx_write_csr(CVMX_L2C_SPAR4, (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
189210284Sjmallett    return 0;
190210284Sjmallett}
191210284Sjmallett
192210284Sjmallettint cvmx_l2c_get_hw_way_partition(void)
193210284Sjmallett{
194215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
195215990Sjmallett        return(cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff);
196215990Sjmallett    else
197215990Sjmallett        return(cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF));
198210284Sjmallett}
199210284Sjmallett
200210284Sjmallettvoid cvmx_l2c_config_perf(uint32_t counter, cvmx_l2c_event_t event,
201210284Sjmallett                          uint32_t clear_on_read)
202215990Sjmallett{
203210284Sjmallett
204215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
205215990Sjmallett    {
206215990Sjmallett        cvmx_l2c_pfctl_t pfctl;
207210284Sjmallett
208215990Sjmallett        pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
209215990Sjmallett
210215990Sjmallett        switch (counter)
211215990Sjmallett        {
212215990Sjmallett            case 0:
213215990Sjmallett                pfctl.s.cnt0sel = event;
214215990Sjmallett                pfctl.s.cnt0ena = 1;
215210284Sjmallett                pfctl.s.cnt0rdclr = clear_on_read;
216215990Sjmallett                break;
217215990Sjmallett            case 1:
218215990Sjmallett                pfctl.s.cnt1sel = event;
219215990Sjmallett                pfctl.s.cnt1ena = 1;
220210284Sjmallett                pfctl.s.cnt1rdclr = clear_on_read;
221215990Sjmallett                break;
222215990Sjmallett            case 2:
223215990Sjmallett                pfctl.s.cnt2sel = event;
224215990Sjmallett                pfctl.s.cnt2ena = 1;
225210284Sjmallett                pfctl.s.cnt2rdclr = clear_on_read;
226215990Sjmallett                break;
227215990Sjmallett            case 3:
228215990Sjmallett            default:
229215990Sjmallett                pfctl.s.cnt3sel = event;
230215990Sjmallett                pfctl.s.cnt3ena = 1;
231210284Sjmallett                pfctl.s.cnt3rdclr = clear_on_read;
232215990Sjmallett                break;
233215990Sjmallett        }
234215990Sjmallett
235215990Sjmallett        cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
236210284Sjmallett    }
237215990Sjmallett    else
238215990Sjmallett    {
239215990Sjmallett        cvmx_l2c_tadx_prf_t l2c_tadx_prf;
240215990Sjmallett        int tad;
241210284Sjmallett
242215990Sjmallett        cvmx_warn("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
243215990Sjmallett
244215990Sjmallett        cvmx_warn_if(clear_on_read, "L2C counters don't support clear on read for this chip\n");
245215990Sjmallett
246215990Sjmallett        l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
247215990Sjmallett
248215990Sjmallett        switch (counter)
249215990Sjmallett        {
250215990Sjmallett            case 0:
251215990Sjmallett                l2c_tadx_prf.s.cnt0sel = event;
252215990Sjmallett                break;
253215990Sjmallett            case 1:
254215990Sjmallett                l2c_tadx_prf.s.cnt1sel = event;
255215990Sjmallett                break;
256215990Sjmallett            case 2:
257215990Sjmallett                l2c_tadx_prf.s.cnt2sel = event;
258215990Sjmallett                break;
259215990Sjmallett            default:
260215990Sjmallett            case 3:
261215990Sjmallett                l2c_tadx_prf.s.cnt3sel = event;
262215990Sjmallett                break;
263215990Sjmallett        }
264215990Sjmallett        for (tad=0; tad<CVMX_L2C_TADS; tad++)
265215990Sjmallett            cvmx_write_csr(CVMX_L2C_TADX_PRF(tad), l2c_tadx_prf.u64);
266215990Sjmallett    }
267210284Sjmallett}
268210284Sjmallett
269210284Sjmallettuint64_t cvmx_l2c_read_perf(uint32_t counter)
270210284Sjmallett{
271210284Sjmallett    switch (counter)
272210284Sjmallett    {
273210284Sjmallett        case 0:
274215990Sjmallett            if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
275215990Sjmallett                return(cvmx_read_csr(CVMX_L2C_PFC0));
276215990Sjmallett            else
277215990Sjmallett            {
278215990Sjmallett                uint64_t counter = 0;
279215990Sjmallett                int tad;
280215990Sjmallett                for (tad=0; tad<CVMX_L2C_TADS; tad++)
281215990Sjmallett                    counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
282215990Sjmallett                return counter;
283215990Sjmallett            }
284210284Sjmallett        case 1:
285215990Sjmallett            if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
286215990Sjmallett                return(cvmx_read_csr(CVMX_L2C_PFC1));
287215990Sjmallett            else
288215990Sjmallett            {
289215990Sjmallett                uint64_t counter = 0;
290215990Sjmallett                int tad;
291215990Sjmallett                for (tad=0; tad<CVMX_L2C_TADS; tad++)
292215990Sjmallett                    counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
293215990Sjmallett                return counter;
294215990Sjmallett            }
295210284Sjmallett        case 2:
296215990Sjmallett            if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
297215990Sjmallett                return(cvmx_read_csr(CVMX_L2C_PFC2));
298215990Sjmallett            else
299215990Sjmallett            {
300215990Sjmallett                uint64_t counter = 0;
301215990Sjmallett                int tad;
302215990Sjmallett                for (tad=0; tad<CVMX_L2C_TADS; tad++)
303215990Sjmallett                    counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
304215990Sjmallett                return counter;
305215990Sjmallett            }
306210284Sjmallett        case 3:
307210284Sjmallett        default:
308215990Sjmallett            if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
309215990Sjmallett                return(cvmx_read_csr(CVMX_L2C_PFC3));
310215990Sjmallett            else
311215990Sjmallett            {
312215990Sjmallett                uint64_t counter = 0;
313215990Sjmallett                int tad;
314215990Sjmallett                for (tad=0; tad<CVMX_L2C_TADS; tad++)
315215990Sjmallett                    counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
316215990Sjmallett                return counter;
317215990Sjmallett            }
318210284Sjmallett    }
319210284Sjmallett}
320210284Sjmallett
321210284Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
322210284Sjmallett/**
323210284Sjmallett * @INTERNAL
324210284Sjmallett * Helper function use to fault in cache lines for L2 cache locking
325210284Sjmallett *
326210284Sjmallett * @param addr   Address of base of memory region to read into L2 cache
327210284Sjmallett * @param len    Length (in bytes) of region to fault in
328210284Sjmallett */
329210284Sjmallettstatic void fault_in(uint64_t addr, int len)
330210284Sjmallett{
331210284Sjmallett    volatile char *ptr;
332210284Sjmallett    volatile char dummy;
333210284Sjmallett    /* Adjust addr and length so we get all cache lines even for
334210284Sjmallett    ** small ranges spanning two cache lines */
335210284Sjmallett    len += addr & CVMX_CACHE_LINE_MASK;
336210284Sjmallett    addr &= ~CVMX_CACHE_LINE_MASK;
337210284Sjmallett    ptr = (volatile char *)cvmx_phys_to_ptr(addr);
338210284Sjmallett    CVMX_DCACHE_INVALIDATE;  /* Invalidate L1 cache to make sure all loads result in data being in L2 */
339210284Sjmallett    while (len > 0)
340210284Sjmallett    {
341210284Sjmallett        dummy += *ptr;
342210284Sjmallett        len -= CVMX_CACHE_LINE_SIZE;
343210284Sjmallett        ptr += CVMX_CACHE_LINE_SIZE;
344210284Sjmallett    }
345210284Sjmallett}
346210284Sjmallett
347210284Sjmallettint cvmx_l2c_lock_line(uint64_t addr)
348210284Sjmallett{
349215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
350215990Sjmallett    {
351215990Sjmallett        int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
352215990Sjmallett        uint64_t assoc = cvmx_l2c_get_num_assoc();
353215990Sjmallett        uint64_t tag = addr >> shift;
354215990Sjmallett        uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
355215990Sjmallett        uint64_t way;
356215990Sjmallett        cvmx_l2c_tadx_tag_t l2c_tadx_tag;
357210284Sjmallett
358215990Sjmallett        CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
359210284Sjmallett
360215990Sjmallett        /* Make sure we were able to lock the line */
361215990Sjmallett        for (way = 0; way < assoc; way++)
362215990Sjmallett        {
363215990Sjmallett            CVMX_CACHE_LTGL2I(index | (way << shift), 0);
364215990Sjmallett            CVMX_SYNC;   // make sure CVMX_L2C_TADX_TAG is updated
365215990Sjmallett            l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
366215990Sjmallett            if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
367215990Sjmallett                break;
368215990Sjmallett        }
369210284Sjmallett
370215990Sjmallett        /* Check if a valid line is found */
371215990Sjmallett        if (way >= assoc)
372215990Sjmallett        {
373215990Sjmallett            //cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr);
374215990Sjmallett            return -1;
375215990Sjmallett        }
376210284Sjmallett
377215990Sjmallett        /* Check if lock bit is not set */
378215990Sjmallett        if (!l2c_tadx_tag.s.lock)
379215990Sjmallett        {
380215990Sjmallett            //cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr);
381215990Sjmallett            return -1;
382215990Sjmallett        }
383210284Sjmallett
384215990Sjmallett        return way;
385210284Sjmallett    }
386210284Sjmallett    else
387210284Sjmallett    {
388215990Sjmallett        int retval = 0;
389215990Sjmallett        cvmx_l2c_dbg_t l2cdbg;
390215990Sjmallett        cvmx_l2c_lckbase_t lckbase;
391215990Sjmallett        cvmx_l2c_lckoff_t lckoff;
392215990Sjmallett        cvmx_l2t_err_t l2t_err;
393210284Sjmallett
394215990Sjmallett        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
395210284Sjmallett
396215990Sjmallett        l2cdbg.u64 = 0;
397215990Sjmallett        lckbase.u64 = 0;
398215990Sjmallett        lckoff.u64 = 0;
399210284Sjmallett
400215990Sjmallett        /* Clear l2t error bits if set */
401215990Sjmallett        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
402215990Sjmallett        l2t_err.s.lckerr = 1;
403215990Sjmallett        l2t_err.s.lckerr2 = 1;
404215990Sjmallett        cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
405210284Sjmallett
406215990Sjmallett        addr &= ~CVMX_CACHE_LINE_MASK;
407210284Sjmallett
408215990Sjmallett        /* Set this core as debug core */
409215990Sjmallett        l2cdbg.s.ppnum = cvmx_get_core_num();
410215990Sjmallett        CVMX_SYNC;
411215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
412215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
413210284Sjmallett
414215990Sjmallett        lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
415215990Sjmallett        cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
416215990Sjmallett        cvmx_read_csr(CVMX_L2C_LCKOFF);
417210284Sjmallett
418215990Sjmallett        if (((cvmx_l2c_cfg_t)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias)
419215990Sjmallett        {
420215990Sjmallett            int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
421215990Sjmallett            uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
422215990Sjmallett            lckbase.s.lck_base = addr_tmp >> 7;
423215990Sjmallett        }
424215990Sjmallett        else
425215990Sjmallett        {
426215990Sjmallett            lckbase.s.lck_base = addr >> 7;
427215990Sjmallett        }
428215990Sjmallett
429215990Sjmallett        lckbase.s.lck_ena = 1;
430215990Sjmallett        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
431215990Sjmallett        cvmx_read_csr(CVMX_L2C_LCKBASE);    // Make sure it gets there
432215990Sjmallett
433215990Sjmallett        fault_in(addr, CVMX_CACHE_LINE_SIZE);
434215990Sjmallett
435215990Sjmallett        lckbase.s.lck_ena = 0;
436215990Sjmallett        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
437215990Sjmallett        cvmx_read_csr(CVMX_L2C_LCKBASE);    // Make sure it gets there
438215990Sjmallett
439215990Sjmallett        /* Stop being debug core */
440215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, 0);
441215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
442215990Sjmallett
443215990Sjmallett        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
444215990Sjmallett        if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
445215990Sjmallett            retval = 1;  /* We were unable to lock the line */
446215990Sjmallett
447215990Sjmallett        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
448215990Sjmallett        return(retval);
449215990Sjmallett    }
450210284Sjmallett}
451210284Sjmallett
452210284Sjmallett
453210284Sjmallettint cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
454210284Sjmallett{
455210284Sjmallett    int retval = 0;
456210284Sjmallett
457210284Sjmallett    /* Round start/end to cache line boundaries */
458210284Sjmallett    len += start & CVMX_CACHE_LINE_MASK;
459210284Sjmallett    start &= ~CVMX_CACHE_LINE_MASK;
460210284Sjmallett    len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
461210284Sjmallett
462210284Sjmallett    while (len)
463210284Sjmallett    {
464210284Sjmallett        retval += cvmx_l2c_lock_line(start);
465210284Sjmallett        start += CVMX_CACHE_LINE_SIZE;
466210284Sjmallett        len -= CVMX_CACHE_LINE_SIZE;
467210284Sjmallett    }
468210284Sjmallett
469210284Sjmallett    return(retval);
470210284Sjmallett}
471210284Sjmallett
472210284Sjmallett
473210284Sjmallettvoid cvmx_l2c_flush(void)
474210284Sjmallett{
475210284Sjmallett    uint64_t assoc, set;
476210284Sjmallett    uint64_t n_assoc, n_set;
477210284Sjmallett
478215990Sjmallett    n_set = cvmx_l2c_get_num_sets();
479215990Sjmallett    n_assoc = cvmx_l2c_get_num_assoc();
480210284Sjmallett
481215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
482210284Sjmallett    {
483215990Sjmallett        uint64_t address;
484215990Sjmallett        /* These may look like constants, but they aren't... */
485215990Sjmallett        int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
486215990Sjmallett        int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
487215990Sjmallett        for (set=0; set < n_set; set++)
488210284Sjmallett        {
489215990Sjmallett            for(assoc=0; assoc < n_assoc; assoc++)
490215990Sjmallett            {
491215990Sjmallett                address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
492215990Sjmallett                                       (assoc << assoc_shift) |
493215990Sjmallett                                       (set << set_shift));
494215990Sjmallett                CVMX_CACHE_WBIL2I(address, 0);
495215990Sjmallett            }
496210284Sjmallett        }
497210284Sjmallett    }
498215990Sjmallett    else
499215990Sjmallett    {
500215990Sjmallett        for (set=0; set < n_set; set++)
501215990Sjmallett            for(assoc=0; assoc < n_assoc; assoc++)
502215990Sjmallett                cvmx_l2c_flush_line(assoc, set);
503215990Sjmallett    }
504210284Sjmallett}
505210284Sjmallett
506210284Sjmallett
507210284Sjmallettint cvmx_l2c_unlock_line(uint64_t address)
508210284Sjmallett{
509210284Sjmallett
510215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
511215990Sjmallett    {
512215990Sjmallett        int assoc;         cvmx_l2c_tag_t tag;
513215990Sjmallett        uint32_t tag_addr;
514215990Sjmallett        uint32_t index = cvmx_l2c_address_to_index(address);
515210284Sjmallett
516215990Sjmallett        tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
517215990Sjmallett
518215990Sjmallett        /* For 63XX, we can flush a line by using the physical address directly,
519215990Sjmallett        ** so finding the cache line used by the address is only required to provide
520215990Sjmallett        ** the proper return value for the function.
521215990Sjmallett        */
522215990Sjmallett        for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
523215990Sjmallett        {
524215990Sjmallett            tag = cvmx_l2c_get_tag(assoc, index);
525215990Sjmallett
526215990Sjmallett            if (tag.s.V && (tag.s.addr == tag_addr))
527215990Sjmallett            {
528215990Sjmallett                CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
529215990Sjmallett                return tag.s.L;
530215990Sjmallett            }
531215990Sjmallett        }
532215990Sjmallett    }
533215990Sjmallett    else
534210284Sjmallett    {
535215990Sjmallett        int assoc;
536215990Sjmallett        cvmx_l2c_tag_t tag;
537215990Sjmallett        uint32_t tag_addr;
538210284Sjmallett
539215990Sjmallett        uint32_t index = cvmx_l2c_address_to_index(address);
540215990Sjmallett
541215990Sjmallett        /* Compute portion of address that is stored in tag */
542215990Sjmallett        tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
543215990Sjmallett        for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
544210284Sjmallett        {
545215990Sjmallett            tag = cvmx_l2c_get_tag(assoc, index);
546210284Sjmallett
547215990Sjmallett            if (tag.s.V && (tag.s.addr == tag_addr))
548215990Sjmallett            {
549215990Sjmallett                cvmx_l2c_flush_line(assoc, index);
550215990Sjmallett                return tag.s.L;
551215990Sjmallett            }
552210284Sjmallett        }
553210284Sjmallett    }
554210284Sjmallett    return 0;
555210284Sjmallett}
556210284Sjmallett
557210284Sjmallettint cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
558210284Sjmallett{
559210284Sjmallett    int num_unlocked = 0;
560210284Sjmallett    /* Round start/end to cache line boundaries */
561210284Sjmallett    len += start & CVMX_CACHE_LINE_MASK;
562210284Sjmallett    start &= ~CVMX_CACHE_LINE_MASK;
563210284Sjmallett    len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
564210284Sjmallett    while (len > 0)
565210284Sjmallett    {
566210284Sjmallett        num_unlocked += cvmx_l2c_unlock_line(start);
567210284Sjmallett        start += CVMX_CACHE_LINE_SIZE;
568210284Sjmallett        len -= CVMX_CACHE_LINE_SIZE;
569210284Sjmallett    }
570210284Sjmallett
571210284Sjmallett    return num_unlocked;
572210284Sjmallett}
573210284Sjmallett
574210284Sjmallett
575210284Sjmallett/* Internal l2c tag types.  These are converted to a generic structure
576210284Sjmallett** that can be used on all chips */
577210284Sjmalletttypedef union
578210284Sjmallett{
579210284Sjmallett    uint64_t u64;
580210284Sjmallett#if __BYTE_ORDER == __BIG_ENDIAN
581210284Sjmallett    struct cvmx_l2c_tag_cn50xx
582210284Sjmallett    {
583210284Sjmallett	uint64_t reserved		: 40;
584210284Sjmallett	uint64_t V			: 1;	// Line valid
585210284Sjmallett	uint64_t D			: 1;	// Line dirty
586210284Sjmallett	uint64_t L			: 1;	// Line locked
587210284Sjmallett	uint64_t U			: 1;	// Use, LRU eviction
588210284Sjmallett	uint64_t addr			: 20;	// Phys mem addr (33..14)
589210284Sjmallett    } cn50xx;
590210284Sjmallett    struct cvmx_l2c_tag_cn30xx
591210284Sjmallett    {
592210284Sjmallett	uint64_t reserved		: 41;
593210284Sjmallett	uint64_t V			: 1;	// Line valid
594210284Sjmallett	uint64_t D			: 1;	// Line dirty
595210284Sjmallett	uint64_t L			: 1;	// Line locked
596210284Sjmallett	uint64_t U			: 1;	// Use, LRU eviction
597210284Sjmallett	uint64_t addr			: 19;	// Phys mem addr (33..15)
598210284Sjmallett    } cn30xx;
599210284Sjmallett    struct cvmx_l2c_tag_cn31xx
600210284Sjmallett    {
601210284Sjmallett	uint64_t reserved		: 42;
602210284Sjmallett	uint64_t V			: 1;	// Line valid
603210284Sjmallett	uint64_t D			: 1;	// Line dirty
604210284Sjmallett	uint64_t L			: 1;	// Line locked
605210284Sjmallett	uint64_t U			: 1;	// Use, LRU eviction
606210284Sjmallett	uint64_t addr			: 18;	// Phys mem addr (33..16)
607210284Sjmallett    } cn31xx;
608210284Sjmallett    struct cvmx_l2c_tag_cn38xx
609210284Sjmallett    {
610210284Sjmallett	uint64_t reserved		: 43;
611210284Sjmallett	uint64_t V			: 1;	// Line valid
612210284Sjmallett	uint64_t D			: 1;	// Line dirty
613210284Sjmallett	uint64_t L			: 1;	// Line locked
614210284Sjmallett	uint64_t U			: 1;	// Use, LRU eviction
615210284Sjmallett	uint64_t addr			: 17;	// Phys mem addr (33..17)
616210284Sjmallett    } cn38xx;
617210284Sjmallett    struct cvmx_l2c_tag_cn58xx
618210284Sjmallett    {
619210284Sjmallett	uint64_t reserved		: 44;
620210284Sjmallett	uint64_t V			: 1;	// Line valid
621210284Sjmallett	uint64_t D			: 1;	// Line dirty
622210284Sjmallett	uint64_t L			: 1;	// Line locked
623210284Sjmallett	uint64_t U			: 1;	// Use, LRU eviction
624210284Sjmallett	uint64_t addr			: 16;	// Phys mem addr (33..18)
625210284Sjmallett    } cn58xx;
626210284Sjmallett    struct cvmx_l2c_tag_cn58xx   cn56xx; /* 2048 sets */
627210284Sjmallett    struct cvmx_l2c_tag_cn31xx   cn52xx; /* 512 sets */
628210284Sjmallett#endif
629210284Sjmallett} __cvmx_l2c_tag_t;
630210284Sjmallett
631210284Sjmallett
632210284Sjmallett/**
633210284Sjmallett * @INTERNAL
634210284Sjmallett * Function to read a L2C tag.  This code make the current core
635210284Sjmallett * the 'debug core' for the L2.  This code must only be executed by
636210284Sjmallett * 1 core at a time.
637210284Sjmallett *
638210284Sjmallett * @param assoc  Association (way) of the tag to dump
639210284Sjmallett * @param index  Index of the cacheline
640210284Sjmallett *
641210284Sjmallett * @return The Octeon model specific tag structure.  This is translated by a wrapper
642210284Sjmallett *         function to a generic form that is easier for applications to use.
643210284Sjmallett */
644210284Sjmallettstatic __cvmx_l2c_tag_t __read_l2_tag(uint64_t assoc, uint64_t index)
645210284Sjmallett{
646210284Sjmallett
647215990Sjmallett    uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
648210284Sjmallett    uint64_t core = cvmx_get_core_num();
649210284Sjmallett    __cvmx_l2c_tag_t tag_val;
650210284Sjmallett    uint64_t dbg_addr = CVMX_L2C_DBG;
651215990Sjmallett    unsigned long flags;
652210284Sjmallett
653210284Sjmallett    cvmx_l2c_dbg_t debug_val;
654210284Sjmallett    debug_val.u64 = 0;
655210284Sjmallett    /* For low core count parts, the core number is always small enough
656210284Sjmallett    ** to stay in the correct field and not set any reserved bits */
657210284Sjmallett    debug_val.s.ppnum = core;
658210284Sjmallett    debug_val.s.l2t = 1;
659210284Sjmallett    debug_val.s.set = assoc;
660210284Sjmallett
661210284Sjmallett    CVMX_SYNC;  /* Make sure core is quiet (no prefetches, etc.) before entering debug mode */
662210284Sjmallett    CVMX_DCACHE_INVALIDATE;  /* Flush L1 to make sure debug load misses L1 */
663210284Sjmallett
664215990Sjmallett    cvmx_local_irq_save(flags);
665210284Sjmallett
666210284Sjmallett    /* The following must be done in assembly as when in debug mode all data loads from
667210284Sjmallett    ** L2 return special debug data, not normal memory contents.  Also, interrupts must be disabled,
668210284Sjmallett    ** since if an interrupt occurs while in debug mode the ISR will get debug data from all its memory
669210284Sjmallett    ** reads instead of the contents of memory */
670210284Sjmallett
671210284Sjmallett        asm volatile (
672210284Sjmallett    "        .set push              \n"
673210284Sjmallett    "        .set mips64              \n"
674210284Sjmallett    "        .set noreorder           \n"
675210284Sjmallett    "        sd    %[dbg_val], 0(%[dbg_addr])  \n"   /* Enter debug mode, wait for store */
676210284Sjmallett    "        ld    $0, 0(%[dbg_addr]) \n"
677210284Sjmallett    "        ld    %[tag_val], 0(%[tag_addr]) \n"   /* Read L2C tag data */
678210284Sjmallett    "        sd    $0, 0(%[dbg_addr])  \n"          /* Exit debug mode, wait for store */
679210284Sjmallett    "        ld    $0, 0(%[dbg_addr]) \n"
680210284Sjmallett    "        cache 9, 0($0) \n"             /* Invalidate dcache to discard debug data */
681210284Sjmallett    "        .set pop             \n"
682210284Sjmallett    :[tag_val] "=r" (tag_val):  [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr) : "memory");
683210284Sjmallett
684215990Sjmallett    cvmx_local_irq_restore(flags);
685210284Sjmallett
686210284Sjmallett    return(tag_val);
687210284Sjmallett
688210284Sjmallett}
689210284Sjmallett
690210284Sjmallett
691210284Sjmallettcvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index)
692210284Sjmallett{
693210284Sjmallett    cvmx_l2c_tag_t tag;
694210284Sjmallett    tag.u64 = 0;
695210284Sjmallett
696210284Sjmallett    if ((int)association >= cvmx_l2c_get_num_assoc())
697210284Sjmallett    {
698215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
699210284Sjmallett        return(tag);
700210284Sjmallett    }
701210284Sjmallett    if ((int)index >= cvmx_l2c_get_num_sets())
702210284Sjmallett    {
703215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n", (int)index, cvmx_l2c_get_num_sets());
704210284Sjmallett        return(tag);
705210284Sjmallett    }
706215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
707215990Sjmallett    {
708215990Sjmallett        cvmx_l2c_tadx_tag_t l2c_tadx_tag;
709215990Sjmallett        uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
710215990Sjmallett                                        (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
711215990Sjmallett                                        (index << CVMX_L2C_IDX_ADDR_SHIFT));
712215990Sjmallett        /* Use L2 cache Index load tag cache instruction, as hardware loads
713215990Sjmallett           the virtual tag for the L2 cache block with the contents of
714215990Sjmallett           L2C_TAD0_TAG register. */
715215990Sjmallett        CVMX_CACHE_LTGL2I(address, 0);
716215990Sjmallett        CVMX_SYNC;   // make sure CVMX_L2C_TADX_TAG is updated
717215990Sjmallett        l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
718210284Sjmallett
719215990Sjmallett        tag.s.V     = l2c_tadx_tag.s.valid;
720215990Sjmallett        tag.s.D     = l2c_tadx_tag.s.dirty;
721215990Sjmallett        tag.s.L     = l2c_tadx_tag.s.lock;
722215990Sjmallett        tag.s.U     = l2c_tadx_tag.s.use;
723215990Sjmallett        tag.s.addr  = l2c_tadx_tag.s.tag;
724210284Sjmallett    }
725210284Sjmallett    else
726210284Sjmallett    {
727215990Sjmallett        __cvmx_l2c_tag_t tmp_tag;
728215990Sjmallett        /* __read_l2_tag is intended for internal use only */
729215990Sjmallett        tmp_tag = __read_l2_tag(association, index);
730215990Sjmallett
731215990Sjmallett        /* Convert all tag structure types to generic version, as it can represent all models */
732215990Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))
733215990Sjmallett        {
734215990Sjmallett            tag.s.V    = tmp_tag.cn58xx.V;
735215990Sjmallett            tag.s.D    = tmp_tag.cn58xx.D;
736215990Sjmallett            tag.s.L    = tmp_tag.cn58xx.L;
737215990Sjmallett            tag.s.U    = tmp_tag.cn58xx.U;
738215990Sjmallett            tag.s.addr = tmp_tag.cn58xx.addr;
739215990Sjmallett        }
740215990Sjmallett        else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
741215990Sjmallett        {
742215990Sjmallett            tag.s.V    = tmp_tag.cn38xx.V;
743215990Sjmallett            tag.s.D    = tmp_tag.cn38xx.D;
744215990Sjmallett            tag.s.L    = tmp_tag.cn38xx.L;
745215990Sjmallett            tag.s.U    = tmp_tag.cn38xx.U;
746215990Sjmallett            tag.s.addr = tmp_tag.cn38xx.addr;
747215990Sjmallett        }
748215990Sjmallett        else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
749215990Sjmallett        {
750215990Sjmallett            tag.s.V    = tmp_tag.cn31xx.V;
751215990Sjmallett            tag.s.D    = tmp_tag.cn31xx.D;
752215990Sjmallett            tag.s.L    = tmp_tag.cn31xx.L;
753215990Sjmallett            tag.s.U    = tmp_tag.cn31xx.U;
754215990Sjmallett            tag.s.addr = tmp_tag.cn31xx.addr;
755215990Sjmallett        }
756215990Sjmallett        else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
757215990Sjmallett        {
758215990Sjmallett            tag.s.V    = tmp_tag.cn30xx.V;
759215990Sjmallett            tag.s.D    = tmp_tag.cn30xx.D;
760215990Sjmallett            tag.s.L    = tmp_tag.cn30xx.L;
761215990Sjmallett            tag.s.U    = tmp_tag.cn30xx.U;
762215990Sjmallett            tag.s.addr = tmp_tag.cn30xx.addr;
763215990Sjmallett        }
764215990Sjmallett        else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
765215990Sjmallett        {
766215990Sjmallett            tag.s.V    = tmp_tag.cn50xx.V;
767215990Sjmallett            tag.s.D    = tmp_tag.cn50xx.D;
768215990Sjmallett            tag.s.L    = tmp_tag.cn50xx.L;
769215990Sjmallett            tag.s.U    = tmp_tag.cn50xx.U;
770215990Sjmallett            tag.s.addr = tmp_tag.cn50xx.addr;
771215990Sjmallett        }
772215990Sjmallett        else
773215990Sjmallett        {
774215990Sjmallett            cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
775215990Sjmallett        }
776210284Sjmallett    }
777210284Sjmallett
778210284Sjmallett    return tag;
779210284Sjmallett}
780210284Sjmallett
781210284Sjmallett#endif
782210284Sjmallett
783210284Sjmallettuint32_t cvmx_l2c_address_to_index (uint64_t addr)
784210284Sjmallett{
785210284Sjmallett    uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
786215990Sjmallett    int indxalias = 0;
787210284Sjmallett
788215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
789210284Sjmallett    {
790215990Sjmallett        cvmx_l2c_ctl_t l2c_ctl;
791215990Sjmallett        l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
792215990Sjmallett        indxalias = !l2c_ctl.s.disidxalias;
793210284Sjmallett    }
794215990Sjmallett    else
795215990Sjmallett    {
796215990Sjmallett        cvmx_l2c_cfg_t l2c_cfg;
797215990Sjmallett        l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
798215990Sjmallett        indxalias = l2c_cfg.s.idxalias;
799215990Sjmallett    }
800215990Sjmallett
801215990Sjmallett    if (indxalias)
802215990Sjmallett    {
803215990Sjmallett        if (OCTEON_IS_MODEL(OCTEON_CN63XX))
804215990Sjmallett        {
805215990Sjmallett            uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
806215990Sjmallett            idx ^= idx / cvmx_l2c_get_num_sets();
807215990Sjmallett            idx ^= a_14_12;
808215990Sjmallett        }
809215990Sjmallett        else
810215990Sjmallett        {
811215990Sjmallett            idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
812215990Sjmallett        }
813215990Sjmallett    }
814210284Sjmallett    idx &= CVMX_L2C_IDX_MASK;
815210284Sjmallett    return(idx);
816210284Sjmallett}
817210284Sjmallett
818210284Sjmallettint cvmx_l2c_get_cache_size_bytes(void)
819210284Sjmallett{
820210284Sjmallett    return (cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() * CVMX_CACHE_LINE_SIZE);
821210284Sjmallett}
822210284Sjmallett
823210284Sjmallett/**
824210284Sjmallett * Return log base 2 of the number of sets in the L2 cache
825210284Sjmallett * @return
826210284Sjmallett */
827210284Sjmallettint cvmx_l2c_get_set_bits(void)
828210284Sjmallett{
829210284Sjmallett    int l2_set_bits;
830210284Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
831210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN58XX))
832210284Sjmallett        l2_set_bits =  11; /* 2048 sets */
833215990Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
834210284Sjmallett        l2_set_bits =  10; /* 1024 sets */
835210284Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
836210284Sjmallett        l2_set_bits =  9; /* 512 sets */
837210284Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
838210284Sjmallett        l2_set_bits =  8; /* 256 sets */
839210284Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
840210284Sjmallett        l2_set_bits =  7; /* 128 sets */
841210284Sjmallett    else
842210284Sjmallett    {
843210284Sjmallett        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
844210284Sjmallett        l2_set_bits =  11; /* 2048 sets */
845210284Sjmallett    }
846210284Sjmallett    return(l2_set_bits);
847210284Sjmallett
848210284Sjmallett}
849210284Sjmallett
850210284Sjmallett/* Return the number of sets in the L2 Cache */
851210284Sjmallettint cvmx_l2c_get_num_sets(void)
852210284Sjmallett{
853210284Sjmallett    return (1 << cvmx_l2c_get_set_bits());
854210284Sjmallett}
855210284Sjmallett
856210284Sjmallett/* Return the number of associations in the L2 Cache */
857210284Sjmallettint cvmx_l2c_get_num_assoc(void)
858210284Sjmallett{
859210284Sjmallett    int l2_assoc;
860210284Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
861210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN52XX) ||
862210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN58XX) ||
863210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN50XX) ||
864210284Sjmallett        OCTEON_IS_MODEL(OCTEON_CN38XX))
865210284Sjmallett        l2_assoc =  8;
866215990Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
867215990Sjmallett        l2_assoc =  16;
868215990Sjmallett    else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
869210284Sjmallett             OCTEON_IS_MODEL(OCTEON_CN30XX))
870210284Sjmallett        l2_assoc =  4;
871210284Sjmallett    else
872210284Sjmallett    {
873210284Sjmallett        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
874210284Sjmallett        l2_assoc =  8;
875210284Sjmallett    }
876210284Sjmallett
877210284Sjmallett    /* Check to see if part of the cache is disabled */
878215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
879215990Sjmallett    {
880215990Sjmallett        cvmx_mio_fus_dat3_t mio_fus_dat3;
881210284Sjmallett
882215990Sjmallett        mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
883215990Sjmallett        /* cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
884215990Sjmallett           <2> will be not used for 63xx
885215990Sjmallett           <1> disables 1/2 ways
886215990Sjmallett           <0> disables 1/4 ways
887215990Sjmallett           They are cumulative, so for 63xx:
888215990Sjmallett           <1> <0>
889215990Sjmallett           0 0 16-way 2MB cache
890215990Sjmallett           0 1 12-way 1.5MB cache
891215990Sjmallett           1 0 8-way 1MB cache
892215990Sjmallett           1 1 4-way 512KB cache */
893215990Sjmallett
894215990Sjmallett        if (mio_fus_dat3.s.l2c_crip == 3)
895215990Sjmallett            l2_assoc = 4;
896215990Sjmallett        else if (mio_fus_dat3.s.l2c_crip == 2)
897215990Sjmallett            l2_assoc = 8;
898215990Sjmallett        else if (mio_fus_dat3.s.l2c_crip == 1)
899215990Sjmallett            l2_assoc = 12;
900215990Sjmallett    }
901215990Sjmallett    else
902215990Sjmallett    {
903215990Sjmallett        cvmx_l2d_fus3_t val;
904215990Sjmallett        val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
905215990Sjmallett        /* Using shifts here, as bit position names are different for
906215990Sjmallett           each model but they all mean the same. */
907215990Sjmallett        if ((val.u64 >> 35) & 0x1)
908215990Sjmallett            l2_assoc = l2_assoc >> 2;
909215990Sjmallett        else if ((val.u64 >> 34) & 0x1)
910215990Sjmallett            l2_assoc = l2_assoc >> 1;
911215990Sjmallett    }
912215990Sjmallett
913210284Sjmallett    return(l2_assoc);
914210284Sjmallett}
915210284Sjmallett
916210284Sjmallett
917210284Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
918210284Sjmallett/**
919210284Sjmallett * Flush a line from the L2 cache
920210284Sjmallett * This should only be called from one core at a time, as this routine
921210284Sjmallett * sets the core to the 'debug' core in order to flush the line.
922210284Sjmallett *
923210284Sjmallett * @param assoc  Association (or way) to flush
924210284Sjmallett * @param index  Index to flush
925210284Sjmallett */
926210284Sjmallettvoid cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
927210284Sjmallett{
928215990Sjmallett    /* Check the range of the index. */
929215990Sjmallett    if (index > (uint32_t)cvmx_l2c_get_num_sets())
930215990Sjmallett    {
931215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
932215990Sjmallett        return;
933215990Sjmallett    }
934210284Sjmallett
935215990Sjmallett    /* Check the range of association. */
936215990Sjmallett    if (assoc > (uint32_t)cvmx_l2c_get_num_assoc())
937215990Sjmallett    {
938215990Sjmallett        cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
939215990Sjmallett        return;
940215990Sjmallett    }
941210284Sjmallett
942215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
943215990Sjmallett    {
944215990Sjmallett        uint64_t address;
945215990Sjmallett        /* Create the address based on index and association.
946215990Sjmallett           Bits<20:17> select the way of the cache block involved in
947215990Sjmallett                       the operation
948215990Sjmallett           Bits<16:7> of the effect address select the index */
949215990Sjmallett        address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
950215990Sjmallett                               (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
951215990Sjmallett                               (index << CVMX_L2C_IDX_ADDR_SHIFT));
952215990Sjmallett        CVMX_CACHE_WBIL2I(address, 0);
953215990Sjmallett    }
954215990Sjmallett    else
955215990Sjmallett    {
956215990Sjmallett        cvmx_l2c_dbg_t l2cdbg;
957210284Sjmallett
958215990Sjmallett        l2cdbg.u64 = 0;
959215990Sjmallett        if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
960215990Sjmallett            l2cdbg.s.ppnum = cvmx_get_core_num();
961215990Sjmallett        l2cdbg.s.finv = 1;
962215990Sjmallett
963215990Sjmallett        l2cdbg.s.set = assoc;
964215990Sjmallett        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
965215990Sjmallett        /* Enter debug mode, and make sure all other writes complete before we
966215990Sjmallett        ** enter debug mode */
967215990Sjmallett        CVMX_SYNC;
968215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
969215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
970215990Sjmallett
971215990Sjmallett        CVMX_PREPARE_FOR_STORE (CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, index*CVMX_CACHE_LINE_SIZE), 0);
972215990Sjmallett        /* Exit debug mode */
973215990Sjmallett        CVMX_SYNC;
974215990Sjmallett        cvmx_write_csr(CVMX_L2C_DBG, 0);
975215990Sjmallett        cvmx_read_csr(CVMX_L2C_DBG);
976215990Sjmallett        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
977215990Sjmallett    }
978210284Sjmallett}
979210284Sjmallett#endif
980215990Sjmallett
981215990Sjmallett#ifndef CVMX_BUILD_FOR_LINUX_HOST
982215990Sjmallett
983215990Sjmallett/* L2C Virtualization APIs. These APIs are based on Octeon II documentation. */
984215990Sjmallett
985215990Sjmallett/**
986215990Sjmallett * @INTERNAL
987215990Sjmallett * Helper function to decode VALUE to number of allowed virtualization IDS.
988215990Sjmallett * Returns L2C_VRT_CTL[NUMID].
989215990Sjmallett *
990215990Sjmallett * @param nvid     Number of virtual Ids.
991215990Sjmallett * @return         On success decode to NUMID, or to -1 on failure.
992215990Sjmallett */
993215990Sjmallettstatic inline int __cvmx_l2c_vrt_decode_numid(int nvid)
994215990Sjmallett{
995215990Sjmallett    int bits = -1;
996215990Sjmallett    int zero_bits = -1;
997215990Sjmallett
998215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
999215990Sjmallett    {
1000215990Sjmallett        if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
1001215990Sjmallett        {
1002215990Sjmallett            cvmx_dprintf("WARNING: Invalid number of virtual ids(%d) requested, should be <= 64\n", nvid);
1003215990Sjmallett            return bits;
1004215990Sjmallett        }
1005215990Sjmallett
1006215990Sjmallett        while (nvid)
1007215990Sjmallett        {
1008215990Sjmallett           if ((nvid & 1) == 0)
1009215990Sjmallett              zero_bits++;
1010215990Sjmallett
1011215990Sjmallett            bits++;
1012215990Sjmallett            nvid >>= 1;
1013215990Sjmallett        }
1014215990Sjmallett
1015215990Sjmallett        if (bits == 1 || (zero_bits && ((bits - zero_bits) == 1)))
1016215990Sjmallett            return zero_bits;
1017215990Sjmallett    }
1018215990Sjmallett    return -1;
1019215990Sjmallett}
1020215990Sjmallett
1021215990Sjmallett/**
1022215990Sjmallett * Set maxium number of Virtual IDs allowed in a machine.
1023215990Sjmallett *
1024215990Sjmallett * @param nvid   Number of virtial ids allowed in a machine.
1025215990Sjmallett * @return       Return 0 on success or -1 on failure.
1026215990Sjmallett */
1027215990Sjmallettint cvmx_l2c_vrt_set_max_virtids(int nvid)
1028215990Sjmallett{
1029215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN63XX))
1030215990Sjmallett    {
1031215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1032215990Sjmallett
1033215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1034215990Sjmallett
1035215990Sjmallett        if (l2c_vrt_ctl.s.enable)
1036215990Sjmallett        {
1037215990Sjmallett            cvmx_dprintf("WARNING: Changing number of Virtual Machine IDs is not allowed after Virtualization is enabled\n");
1038215990Sjmallett            return -1;
1039215990Sjmallett        }
1040215990Sjmallett
1041215990Sjmallett        if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
1042215990Sjmallett        {
1043215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_max_virtids: Invalid number of Virtual Machine IDs(%d) requested, max allowed %d\n", nvid, CVMX_L2C_VRT_MAX_VIRTID_ALLOWED);
1044215990Sjmallett            return -1;
1045215990Sjmallett        }
1046215990Sjmallett
1047215990Sjmallett        /* Calculate the numid based on nvid */
1048215990Sjmallett        l2c_vrt_ctl.s.numid = __cvmx_l2c_vrt_decode_numid(nvid);
1049215990Sjmallett        cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1050215990Sjmallett    }
1051215990Sjmallett    return 0;
1052215990Sjmallett}
1053215990Sjmallett
1054215990Sjmallett/**
1055215990Sjmallett * Get maxium number of virtual IDs allowed in a machine.
1056215990Sjmallett *
1057215990Sjmallett * @return  Return number of virtual machine IDs or -1 on failure.
1058215990Sjmallett */
1059215990Sjmallettint cvmx_l2c_vrt_get_max_virtids(void)
1060215990Sjmallett{
1061215990Sjmallett    int virtids = -1;
1062215990Sjmallett
1063215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1064215990Sjmallett    {
1065215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1066215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1067215990Sjmallett        virtids = 1 << (l2c_vrt_ctl.s.numid + 1);
1068215990Sjmallett        if (virtids > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
1069215990Sjmallett        {
1070215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_get_max_virtids: Invalid number of Virtual IDs initialized (%d)\n", virtids);
1071215990Sjmallett            return -1;
1072215990Sjmallett        }
1073215990Sjmallett    }
1074215990Sjmallett    return virtids;
1075215990Sjmallett}
1076215990Sjmallett
1077215990Sjmallett/**
1078215990Sjmallett * @INTERNAL
1079215990Sjmallett * Helper function to decode VALUE to memory space coverage of L2C_VRT_MEM.
1080215990Sjmallett * Returns L2C_VRT_CTL[MEMSZ].
1081215990Sjmallett *
1082215990Sjmallett * @param memsz    Memory in GB.
1083215990Sjmallett * @return         On success, decode to MEMSZ, or on failure return -1.
1084215990Sjmallett */
1085215990Sjmallettstatic inline int __cvmx_l2c_vrt_decode_memsize(int memsz)
1086215990Sjmallett{
1087215990Sjmallett    int bits = 0;
1088215990Sjmallett    int zero_bits = 0;
1089215990Sjmallett
1090215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1091215990Sjmallett    {
1092215990Sjmallett        if (memsz == 0 || memsz > CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED)
1093215990Sjmallett        {
1094215990Sjmallett            cvmx_dprintf("WARNING: Invalid virtual memory size(%d) requested, should be <= %d\n", memsz, CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED);
1095215990Sjmallett            return -1;
1096215990Sjmallett        }
1097215990Sjmallett
1098215990Sjmallett        while (memsz)
1099215990Sjmallett        {
1100215990Sjmallett           if ((memsz & 1) == 0)
1101215990Sjmallett              zero_bits++;
1102215990Sjmallett
1103215990Sjmallett            bits++;
1104215990Sjmallett            memsz >>= 1;
1105215990Sjmallett        }
1106215990Sjmallett
1107215990Sjmallett        if (bits == 1 || (bits - zero_bits) == 1)
1108215990Sjmallett            return zero_bits;
1109215990Sjmallett    }
1110215990Sjmallett    return -1;
1111215990Sjmallett}
1112215990Sjmallett
1113215990Sjmallett/**
1114215990Sjmallett * Set the maxium size of memory space to be allocated for virtualization.
1115215990Sjmallett *
1116215990Sjmallett * @param memsz  Size of the virtual memory in GB
1117215990Sjmallett * @return       Return 0 on success or -1 on failure.
1118215990Sjmallett */
1119215990Sjmallettint cvmx_l2c_vrt_set_max_memsz(int memsz)
1120215990Sjmallett{
1121215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1122215990Sjmallett    {
1123215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1124215990Sjmallett        int decode = 0;
1125215990Sjmallett
1126215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1127215990Sjmallett
1128215990Sjmallett        if (l2c_vrt_ctl.s.enable)
1129215990Sjmallett        {
1130215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Changing the size of the memory after Virtualization is enabled is not allowed.\n");
1131215990Sjmallett            return -1;
1132215990Sjmallett        }
1133215990Sjmallett
1134215990Sjmallett        if (memsz >= (int)(cvmx_sysinfo_get()->system_dram_size / 1000000))
1135215990Sjmallett        {
1136215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), greater than available on the chip\n", memsz);
1137215990Sjmallett            return -1;
1138215990Sjmallett        }
1139215990Sjmallett
1140215990Sjmallett        decode = __cvmx_l2c_vrt_decode_memsize(memsz);
1141215990Sjmallett        if (decode == -1)
1142215990Sjmallett        {
1143215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), refer to L2C_VRT_CTL[MEMSZ] for more information\n", memsz);
1144215990Sjmallett            return -1;
1145215990Sjmallett        }
1146215990Sjmallett
1147215990Sjmallett        l2c_vrt_ctl.s.memsz = decode;
1148215990Sjmallett        cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1149215990Sjmallett    }
1150215990Sjmallett    return 0;
1151215990Sjmallett}
1152215990Sjmallett
1153215990Sjmallett/**
1154215990Sjmallett * Set a Virtual ID to a set of cores.
1155215990Sjmallett *
1156215990Sjmallett * @param virtid    Assign virtid to a set of cores.
1157215990Sjmallett * @param coremask  The group of cores to assign a unique virtual id.
1158215990Sjmallett * @return          Return 0 on success, otherwise -1.
1159215990Sjmallett */
1160215990Sjmallettint cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask)
1161215990Sjmallett{
1162215990Sjmallett    uint32_t core = 0;
1163215990Sjmallett
1164215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1165215990Sjmallett    {
1166215990Sjmallett        int found = 0;
1167215990Sjmallett        int max_virtid = cvmx_l2c_vrt_get_max_virtids();
1168215990Sjmallett
1169215990Sjmallett        if (virtid > max_virtid)
1170215990Sjmallett        {
1171215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Max %d number of virtids are allowed, passed %d.\n", max_virtid, virtid);
1172215990Sjmallett            return -1;
1173215990Sjmallett        }
1174215990Sjmallett
1175215990Sjmallett        while (core < cvmx_octeon_num_cores())
1176215990Sjmallett        {
1177215990Sjmallett            if ((coremask >> core) & 1)
1178215990Sjmallett            {
1179215990Sjmallett                cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1180215990Sjmallett                cvmx_l2c_virtid_iobx_t l2c_virtid_iobx;
1181215990Sjmallett                l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1182215990Sjmallett
1183215990Sjmallett                /* Check if the core already has a virtid assigned. */
1184215990Sjmallett                if (l2c_virtid_ppx.s.id)
1185215990Sjmallett                {
1186215990Sjmallett                    cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Changing virtid of core #%d to %d from %d.\n",
1187215990Sjmallett                        (unsigned int)core, virtid, l2c_virtid_ppx.s.id);
1188215990Sjmallett
1189215990Sjmallett                    /* Flush L2 cache to avoid write errors */
1190215990Sjmallett                    cvmx_l2c_flush();
1191215990Sjmallett                }
1192215990Sjmallett                cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), virtid & 0x3f);
1193215990Sjmallett
1194215990Sjmallett                /* Set the IOB to normal mode. */
1195215990Sjmallett                l2c_virtid_iobx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_IOBX(core));
1196215990Sjmallett                l2c_virtid_iobx.s.id = 1;
1197215990Sjmallett                l2c_virtid_iobx.s.dwbid = 0;
1198215990Sjmallett                cvmx_write_csr(CVMX_L2C_VIRTID_IOBX(core), l2c_virtid_iobx.u64);
1199215990Sjmallett                found = 1;
1200215990Sjmallett            }
1201215990Sjmallett            core++;
1202215990Sjmallett        }
1203215990Sjmallett
1204215990Sjmallett        /* Invalid coremask passed. */
1205215990Sjmallett        if (!found)
1206215990Sjmallett        {
1207215990Sjmallett           cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Invalid coremask(0x%x) passed\n", (unsigned int)coremask);
1208215990Sjmallett           return -1;
1209215990Sjmallett        }
1210215990Sjmallett    }
1211215990Sjmallett    return 0;
1212215990Sjmallett}
1213215990Sjmallett
1214215990Sjmallett/**
1215215990Sjmallett * Remove a virt id assigned to a set of cores. Update the virtid mask and
1216215990Sjmallett * virtid stored for each core.
1217215990Sjmallett *
1218215990Sjmallett * @param virtid  Remove the specified Virtualization machine ID.
1219215990Sjmallett */
1220215990Sjmallettvoid cvmx_l2c_vrt_remove_virtid(int virtid)
1221215990Sjmallett{
1222215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1223215990Sjmallett    {
1224215990Sjmallett        uint32_t core;
1225215990Sjmallett        cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1226215990Sjmallett
1227215990Sjmallett        for (core = 0; core < cvmx_octeon_num_cores(); core++)
1228215990Sjmallett        {
1229215990Sjmallett            l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1230215990Sjmallett            if (virtid == l2c_virtid_ppx.s.id)
1231215990Sjmallett                cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), 0);
1232215990Sjmallett        }
1233215990Sjmallett    }
1234215990Sjmallett}
1235215990Sjmallett
1236215990Sjmallett/**
1237215990Sjmallett * Helper function to protect the memory region based on the granularity.
1238215990Sjmallett */
1239215990Sjmallettstatic uint64_t __cvmx_l2c_vrt_get_granularity(void)
1240215990Sjmallett{
1241215990Sjmallett    uint64_t gran = 0;
1242215990Sjmallett
1243215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1244215990Sjmallett    {
1245215990Sjmallett        int nvid;
1246215990Sjmallett        uint64_t szd;
1247215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1248215990Sjmallett
1249215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1250215990Sjmallett        nvid = cvmx_l2c_vrt_get_max_virtids();
1251215990Sjmallett        szd = (1ull << l2c_vrt_ctl.s.memsz) * 1024 * 1024 * 1024;
1252215990Sjmallett        gran = (unsigned long long)(szd * nvid)/(32ull * 1024);
1253215990Sjmallett    }
1254215990Sjmallett    return gran;
1255215990Sjmallett}
1256215990Sjmallett
1257215990Sjmallett/**
1258215990Sjmallett * Block a memory region to be updated for a given virtual id.
1259215990Sjmallett *
1260215990Sjmallett * @param start_addr   Starting address of memory region
1261215990Sjmallett * @param size         Size of the memory to protect
1262215990Sjmallett * @param virtid       Virtual ID to use
1263215990Sjmallett * @param mode         Allow/Disallow write access
1264215990Sjmallett *                        = 0,  Allow write access by virtid
1265215990Sjmallett *                        = 1,  Disallow write access by virtid
1266215990Sjmallett */
1267215990Sjmallettint cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode)
1268215990Sjmallett{
1269215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1270215990Sjmallett    {
1271215990Sjmallett        /* Check the alignment of start address, should be aligned to the
1272215990Sjmallett           granularity. */
1273215990Sjmallett        uint64_t gran = __cvmx_l2c_vrt_get_granularity();
1274215990Sjmallett        uint64_t end_addr = start_addr + size;
1275215990Sjmallett        int byte_offset, virtid_offset;
1276215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1277215990Sjmallett        cvmx_l2c_vrt_memx_t l2c_vrt_mem;
1278215990Sjmallett
1279215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1280215990Sjmallett
1281215990Sjmallett        /* No need to protect if virtualization is not enabled */
1282215990Sjmallett        if (!l2c_vrt_ctl.s.enable)
1283215990Sjmallett        {
1284215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization is not enabled.\n");
1285215990Sjmallett            return -1;
1286215990Sjmallett        }
1287215990Sjmallett
1288215990Sjmallett        if (virtid > cvmx_l2c_vrt_get_max_virtids())
1289215990Sjmallett        {
1290215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id is greater than max allowed\n");
1291215990Sjmallett            return -1;
1292215990Sjmallett        }
1293215990Sjmallett
1294215990Sjmallett        /* No need to protect if virtid is not assigned to a core */
1295215990Sjmallett        {
1296215990Sjmallett            cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1297215990Sjmallett            int found = 0;
1298215990Sjmallett            uint32_t core;
1299215990Sjmallett
1300215990Sjmallett            for (core = 0; core < cvmx_octeon_num_cores(); core++)
1301215990Sjmallett            {
1302215990Sjmallett                l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1303215990Sjmallett                if (l2c_virtid_ppx.s.id == virtid)
1304215990Sjmallett                {
1305215990Sjmallett                    found = 1;
1306215990Sjmallett                    break;
1307215990Sjmallett                }
1308215990Sjmallett            }
1309215990Sjmallett            if (found == 0)
1310215990Sjmallett            {
1311215990Sjmallett                cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id (%d) is not assigned to any core.\n", virtid);
1312215990Sjmallett                return -1;
1313215990Sjmallett            }
1314215990Sjmallett        }
1315215990Sjmallett
1316215990Sjmallett        /* Make sure previous stores are through before protecting the memory. */
1317215990Sjmallett        CVMX_SYNCW;
1318215990Sjmallett
1319215990Sjmallett        /* If the L2/DRAM physical address is >= 512 MB, subtract 256 MB
1320215990Sjmallett           to get the address to use. This is because L2C removes the 256MB
1321215990Sjmallett           "hole" between DR0 and DR1. */
1322215990Sjmallett        if (start_addr >= (512 * 1024 * 1024))
1323215990Sjmallett            start_addr -= 256 * 1024 * 1024;
1324215990Sjmallett
1325215990Sjmallett        if (start_addr != ((start_addr + (gran - 1)) & ~(gran - 1)))
1326215990Sjmallett        {
1327215990Sjmallett            cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Start address is not aligned\n");
1328215990Sjmallett            return -1;
1329215990Sjmallett        }
1330215990Sjmallett
1331215990Sjmallett        /* Check the size of the memory to protect, should be aligned to the
1332215990Sjmallett           granularity. */
1333215990Sjmallett        if (end_addr != ((end_addr + (gran - 1)) & ~(gran - 1)))
1334215990Sjmallett        {
1335215990Sjmallett            end_addr = (start_addr + (gran - 1)) & ~(gran - 1);
1336215990Sjmallett            size = start_addr - end_addr;
1337215990Sjmallett        }
1338215990Sjmallett
1339215990Sjmallett        byte_offset = l2c_vrt_ctl.s.memsz + l2c_vrt_ctl.s.numid + 16;
1340215990Sjmallett        virtid_offset = 14 - l2c_vrt_ctl.s.numid;
1341215990Sjmallett
1342215990Sjmallett        cvmx_spinlock_lock(&cvmx_l2c_vrt_spinlock);
1343215990Sjmallett
1344215990Sjmallett        /* Enable memory protection for each virtid for the specified range. */
1345215990Sjmallett        while (start_addr < end_addr)
1346215990Sjmallett        {
1347215990Sjmallett            /* When L2C virtualization is enabled and a bit is set in
1348215990Sjmallett               L2C_VRT_MEM(0..1023), then L2C prevents the selected virtual
1349215990Sjmallett               machine from storing to the selected L2C/DRAM region. */
1350215990Sjmallett            int offset, position, i;
1351215990Sjmallett            int l2c_vrt_mem_bit_index = start_addr >> byte_offset;
1352215990Sjmallett            l2c_vrt_mem_bit_index |= (virtid << virtid_offset);
1353215990Sjmallett
1354215990Sjmallett            offset = l2c_vrt_mem_bit_index >> 5;
1355215990Sjmallett            position = l2c_vrt_mem_bit_index & 0x1f;
1356215990Sjmallett
1357215990Sjmallett            l2c_vrt_mem.u64 = cvmx_read_csr(CVMX_L2C_VRT_MEMX(offset));
1358215990Sjmallett            /* Allow/Disallow write access to memory. */
1359215990Sjmallett            if (mode == 0)
1360215990Sjmallett                l2c_vrt_mem.s.data &= ~(1 << position);
1361215990Sjmallett            else
1362215990Sjmallett                l2c_vrt_mem.s.data |= 1 << position;
1363215990Sjmallett            l2c_vrt_mem.s.parity = 0;
1364215990Sjmallett            /* PARITY<i> is the even parity of DATA<i*8+7:i*8>, which means
1365215990Sjmallett               that each bit<i> in PARITY[0..3], is the XOR of all the bits
1366215990Sjmallett               in the corresponding byte in DATA. */
1367215990Sjmallett            for (i = 0; i <= 4; i++)
1368215990Sjmallett            {
1369215990Sjmallett               uint64_t mask = 0xffull << (i*8);
1370215990Sjmallett               if ((cvmx_pop(l2c_vrt_mem.s.data & mask) & 0x1))
1371215990Sjmallett                   l2c_vrt_mem.s.parity |= (1ull << i);
1372215990Sjmallett            }
1373215990Sjmallett            cvmx_write_csr(CVMX_L2C_VRT_MEMX(offset), l2c_vrt_mem.u64);
1374215990Sjmallett            start_addr += gran;
1375215990Sjmallett        }
1376215990Sjmallett
1377215990Sjmallett        cvmx_spinlock_unlock(&cvmx_l2c_vrt_spinlock);
1378215990Sjmallett    }
1379215990Sjmallett    return 0;
1380215990Sjmallett}
1381215990Sjmallett#endif
1382215990Sjmallett
1383215990Sjmallett/**
1384215990Sjmallett * Enable virtualization.
1385215990Sjmallett *
1386215990Sjmallett * @param mode   Whether out of bound writes are an error.
1387215990Sjmallett */
1388215990Sjmallettvoid cvmx_l2c_vrt_enable(int mode)
1389215990Sjmallett{
1390215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1391215990Sjmallett    {
1392215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1393215990Sjmallett
1394215990Sjmallett        /* Enable global virtualization */
1395215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1396215990Sjmallett        l2c_vrt_ctl.s.ooberr = mode;
1397215990Sjmallett        l2c_vrt_ctl.s.enable = 1;
1398215990Sjmallett        cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1399215990Sjmallett    }
1400215990Sjmallett}
1401215990Sjmallett
1402215990Sjmallett/**
1403215990Sjmallett * Disable virtualization.
1404215990Sjmallett */
1405215990Sjmallettvoid cvmx_l2c_vrt_disable(void)
1406215990Sjmallett{
1407215990Sjmallett    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1408215990Sjmallett    {
1409215990Sjmallett        cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1410215990Sjmallett
1411215990Sjmallett        /* Disable global virtualization */
1412215990Sjmallett        l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1413215990Sjmallett        l2c_vrt_ctl.s.enable = 0;
1414215990Sjmallett        cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1415215990Sjmallett    }
1416215990Sjmallett}
1417