summaryrefslogtreecommitdiffstats
path: root/sys/contrib/octeon-sdk/cvmx-l2c.c
diff options
context:
space:
mode:
authorjmallett <jmallett@FreeBSD.org>2010-11-28 08:18:16 +0000
committerjmallett <jmallett@FreeBSD.org>2010-11-28 08:18:16 +0000
commitcdfefa0ba06939d897cb0d1f5987a4d0996ea3ef (patch)
tree0a50a5816f02b42087de787ad200f1eb12f29144 /sys/contrib/octeon-sdk/cvmx-l2c.c
parent4b7c147940d7db81a4434262cf5cb2f5cd0102f2 (diff)
parent76ef03b9cb287a0817808454c8b27cbcce5243d3 (diff)
downloadFreeBSD-src-cdfefa0ba06939d897cb0d1f5987a4d0996ea3ef.zip
FreeBSD-src-cdfefa0ba06939d897cb0d1f5987a4d0996ea3ef.tar.gz
Merge Cavium Octeon SDK 2.0 Simple Executive; this brings some fixes and new
facilities as well as support for the Octeon 2 family of SoCs. XXX Note that with our antediluvian assembler, we can't support some Octeon 2 instructions and fall back to using the old ones instead.
Diffstat (limited to 'sys/contrib/octeon-sdk/cvmx-l2c.c')
-rw-r--r--sys/contrib/octeon-sdk/cvmx-l2c.c1181
1 files changed, 925 insertions, 256 deletions
diff --git a/sys/contrib/octeon-sdk/cvmx-l2c.c b/sys/contrib/octeon-sdk/cvmx-l2c.c
index c1a3320..fab7141 100644
--- a/sys/contrib/octeon-sdk/cvmx-l2c.c
+++ b/sys/contrib/octeon-sdk/cvmx-l2c.c
@@ -1,39 +1,40 @@
/***********************license start***************
- * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
- * reserved.
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
*
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
- * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
- * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
- * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
- * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- *
- *
- * For any questions regarding licensing please contact marketing@caviumnetworks.com
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
*
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
@@ -41,21 +42,29 @@
+
/**
* @file
*
* Implementation of the Level 2 Cache (L2C) control,
* measurement, and debugging facilities.
*
- * <hr>$Revision: 41586 $<hr>
+ * <hr>$Revision: 52004 $<hr>
*
*/
+#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#else
+#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "cvmx-config.h"
+#endif
#include "cvmx.h"
#include "cvmx-l2c.h"
#include "cvmx-spinlock.h"
#include "cvmx-interrupt.h"
-
+#endif
#ifndef CVMX_BUILD_FOR_LINUX_HOST
/* This spinlock is used internally to ensure that only one core is performing
@@ -67,11 +76,8 @@
CVMX_SHARED cvmx_spinlock_t cvmx_l2c_spinlock;
#endif
-static inline int l2_size_half(void)
-{
- uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
- return !!(val & (1ull << 34));
-}
+CVMX_SHARED cvmx_spinlock_t cvmx_l2c_vrt_spinlock;
+
int cvmx_l2c_get_core_way_partition(uint32_t core)
{
uint32_t field;
@@ -80,6 +86,9 @@ int cvmx_l2c_get_core_way_partition(uint32_t core)
if (core >= cvmx_octeon_num_cores())
return -1;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return (cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff);
+
/* Use the lower two bits of the coreNumber to determine the bit offset
* of the UMSK[] field in the L2C_SPAR register.
*/
@@ -112,18 +121,19 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
mask &= valid_mask;
- /* A UMSK setting which blocks all L2C Ways is an error. */
- if (mask == valid_mask)
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
- /* Check to make sure current mask & new mask don't block all ways */
- if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) == valid_mask)
- return -1;
-
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
+ return 0;
+ }
/* Use the lower two bits of core to determine the bit offset of the
* UMSK[] field in the L2C_SPAR register.
@@ -168,59 +178,92 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask)
valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
mask &= valid_mask;
- /* A UMSK setting which blocks all L2C Ways is an error. */
- if (mask == valid_mask)
- return -1;
- /* Check to make sure current mask & new mask don't block all ways */
- if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) == valid_mask)
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
- cvmx_write_csr(CVMX_L2C_SPAR4, (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
+ else
+ cvmx_write_csr(CVMX_L2C_SPAR4, (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
return 0;
}
int cvmx_l2c_get_hw_way_partition(void)
{
- return(cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF));
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return(cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff);
+ else
+ return(cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF));
}
-
void cvmx_l2c_config_perf(uint32_t counter, cvmx_l2c_event_t event,
uint32_t clear_on_read)
-{ cvmx_l2c_pfctl_t pfctl;
-
- pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+{
- switch (counter)
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
{
- case 0:
- pfctl.s.cnt0sel = event;
- pfctl.s.cnt0ena = 1;
- if (!cvmx_octeon_is_pass1())
+ cvmx_l2c_pfctl_t pfctl;
+
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+ switch (counter)
+ {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
pfctl.s.cnt0rdclr = clear_on_read;
- break;
- case 1:
- pfctl.s.cnt1sel = event;
- pfctl.s.cnt1ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
pfctl.s.cnt1rdclr = clear_on_read;
- break;
- case 2:
- pfctl.s.cnt2sel = event;
- pfctl.s.cnt2ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
pfctl.s.cnt2rdclr = clear_on_read;
- break;
- case 3:
- default:
- pfctl.s.cnt3sel = event;
- pfctl.s.cnt3ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
pfctl.s.cnt3rdclr = clear_on_read;
- break;
+ break;
+ }
+
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
}
+ else
+ {
+ cvmx_l2c_tadx_prf_t l2c_tadx_prf;
+ int tad;
+
+ cvmx_warn("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
+
+ cvmx_warn_if(clear_on_read, "L2C counters don't support clear on read for this chip\n");
+
+ l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
- cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ switch (counter)
+ {
+ case 0:
+ l2c_tadx_prf.s.cnt0sel = event;
+ break;
+ case 1:
+ l2c_tadx_prf.s.cnt1sel = event;
+ break;
+ case 2:
+ l2c_tadx_prf.s.cnt2sel = event;
+ break;
+ default:
+ case 3:
+ l2c_tadx_prf.s.cnt3sel = event;
+ break;
+ }
+ for (tad=0; tad<CVMX_L2C_TADS; tad++)
+ cvmx_write_csr(CVMX_L2C_TADX_PRF(tad), l2c_tadx_prf.u64);
+ }
}
uint64_t cvmx_l2c_read_perf(uint32_t counter)
@@ -228,14 +271,50 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
switch (counter)
{
case 0:
- return(cvmx_read_csr(CVMX_L2C_PFC0));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return(cvmx_read_csr(CVMX_L2C_PFC0));
+ else
+ {
+ uint64_t counter = 0;
+ int tad;
+ for (tad=0; tad<CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
+ return counter;
+ }
case 1:
- return(cvmx_read_csr(CVMX_L2C_PFC1));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return(cvmx_read_csr(CVMX_L2C_PFC1));
+ else
+ {
+ uint64_t counter = 0;
+ int tad;
+ for (tad=0; tad<CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
+ return counter;
+ }
case 2:
- return(cvmx_read_csr(CVMX_L2C_PFC2));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return(cvmx_read_csr(CVMX_L2C_PFC2));
+ else
+ {
+ uint64_t counter = 0;
+ int tad;
+ for (tad=0; tad<CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
+ return counter;
+ }
case 3:
default:
- return(cvmx_read_csr(CVMX_L2C_PFC3));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return(cvmx_read_csr(CVMX_L2C_PFC3));
+ else
+ {
+ uint64_t counter = 0;
+ int tad;
+ for (tad=0; tad<CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
+ return counter;
+ }
}
}
@@ -267,67 +346,107 @@ static void fault_in(uint64_t addr, int len)
int cvmx_l2c_lock_line(uint64_t addr)
{
- int retval = 0;
- cvmx_l2c_dbg_t l2cdbg;
- cvmx_l2c_lckbase_t lckbase;
- cvmx_l2c_lckoff_t lckoff;
- cvmx_l2t_err_t l2t_err;
- l2cdbg.u64 = 0;
- lckbase.u64 = 0;
- lckoff.u64 = 0;
-
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
-
- /* Clear l2t error bits if set */
- l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
- l2t_err.s.lckerr = 1;
- l2t_err.s.lckerr2 = 1;
- cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ uint64_t assoc = cvmx_l2c_get_num_assoc();
+ uint64_t tag = addr >> shift;
+ uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
+ uint64_t way;
+ cvmx_l2c_tadx_tag_t l2c_tadx_tag;
- addr &= ~CVMX_CACHE_LINE_MASK;
+ CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
- /* Set this core as debug core */
- l2cdbg.s.ppnum = cvmx_get_core_num();
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
+ /* Make sure we were able to lock the line */
+ for (way = 0; way < assoc; way++)
+ {
+ CVMX_CACHE_LTGL2I(index | (way << shift), 0);
+ CVMX_SYNC; // make sure CVMX_L2C_TADX_TAG is updated
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+ if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
+ break;
+ }
- lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
- cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
- cvmx_read_csr(CVMX_L2C_LCKOFF);
+ /* Check if a valid line is found */
+ if (way >= assoc)
+ {
+ //cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr);
+ return -1;
+ }
- if (((cvmx_l2c_cfg_t)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias)
- {
- int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
- uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
- lckbase.s.lck_base = addr_tmp >> 7;
+ /* Check if lock bit is not set */
+ if (!l2c_tadx_tag.s.lock)
+ {
+ //cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr);
+ return -1;
+ }
+
+ return way;
}
else
{
- lckbase.s.lck_base = addr >> 7;
- }
+ int retval = 0;
+ cvmx_l2c_dbg_t l2cdbg;
+ cvmx_l2c_lckbase_t lckbase;
+ cvmx_l2c_lckoff_t lckoff;
+ cvmx_l2t_err_t l2t_err;
- lckbase.s.lck_ena = 1;
- cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
- cvmx_read_csr(CVMX_L2C_LCKBASE); // Make sure it gets there
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- fault_in(addr, CVMX_CACHE_LINE_SIZE);
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
- lckbase.s.lck_ena = 0;
- cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
- cvmx_read_csr(CVMX_L2C_LCKBASE); // Make sure it gets there
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
- /* Stop being debug core */
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ addr &= ~CVMX_CACHE_LINE_MASK;
- l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
- if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
- retval = 1; /* We were unable to lock the line */
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
- return(retval);
+ if (((cvmx_l2c_cfg_t)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias)
+ {
+ int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+ uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+ lckbase.s.lck_base = addr_tmp >> 7;
+ }
+ else
+ {
+ lckbase.s.lck_base = addr >> 7;
+ }
+
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ cvmx_read_csr(CVMX_L2C_LCKBASE); // Make sure it gets there
+
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ cvmx_read_csr(CVMX_L2C_LCKBASE); // Make sure it gets there
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return(retval);
+ }
}
@@ -355,77 +474,83 @@ void cvmx_l2c_flush(void)
{
uint64_t assoc, set;
uint64_t n_assoc, n_set;
- cvmx_l2c_dbg_t l2cdbg;
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ n_set = cvmx_l2c_get_num_sets();
+ n_assoc = cvmx_l2c_get_num_assoc();
- l2cdbg.u64 = 0;
- if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.finv = 1;
- n_set = CVMX_L2_SETS;
- n_assoc = l2_size_half() ? (CVMX_L2_ASSOC/2) : CVMX_L2_ASSOC ;
- for(set=0; set < n_set; set++)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
- for(assoc = 0; assoc < n_assoc; assoc++)
+ uint64_t address;
+ /* These may look like constants, but they aren't... */
+ int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+ for (set=0; set < n_set; set++)
{
- l2cdbg.s.set = assoc;
- /* Enter debug mode, and make sure all other writes complete before we
- ** enter debug mode */
- CVMX_SYNCW;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE (CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, set*CVMX_CACHE_LINE_SIZE), 0);
- CVMX_SYNCW; /* Push STF out to L2 */
- /* Exit debug mode */
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ for(assoc=0; assoc < n_assoc; assoc++)
+ {
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << assoc_shift) |
+ (set << set_shift));
+ CVMX_CACHE_WBIL2I(address, 0);
+ }
}
}
-
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ else
+ {
+ for (set=0; set < n_set; set++)
+ for(assoc=0; assoc < n_assoc; assoc++)
+ cvmx_l2c_flush_line(assoc, set);
+ }
}
int cvmx_l2c_unlock_line(uint64_t address)
{
- int assoc;
- cvmx_l2c_tag_t tag;
- cvmx_l2c_dbg_t l2cdbg;
- uint32_t tag_addr;
-
- uint32_t index = cvmx_l2c_address_to_index(address);
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- /* Compute portion of address that is stored in tag */
- tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
- for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
{
- tag = cvmx_get_l2c_tag(assoc, index);
+ int assoc; cvmx_l2c_tag_t tag;
+ uint32_t tag_addr;
+ uint32_t index = cvmx_l2c_address_to_index(address);
- if (tag.s.V && (tag.s.addr == tag_addr))
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+
+ /* For 63XX, we can flush a line by using the physical address directly,
+ ** so finding the cache line used by the address is only required to provide
+ ** the proper return value for the function.
+ */
+ for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
{
- l2cdbg.u64 = 0;
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.set = assoc;
- l2cdbg.s.finv = 1;
+ tag = cvmx_l2c_get_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr))
+ {
+ CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
+ return tag.s.L;
+ }
+ }
+ }
+ else
+ {
+ int assoc;
+ cvmx_l2c_tag_t tag;
+ uint32_t tag_addr;
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); /* Enter debug mode */
- cvmx_read_csr(CVMX_L2C_DBG);
+ uint32_t index = cvmx_l2c_address_to_index(address);
- CVMX_PREPARE_FOR_STORE (CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
- CVMX_SYNC;
- /* Exit debug mode */
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
- return tag.s.L;
+ /* Compute portion of address that is stored in tag */
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
+ {
+ tag = cvmx_l2c_get_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr))
+ {
+ cvmx_l2c_flush_line(assoc, index);
+ return tag.s.L;
+ }
}
}
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return 0;
}
@@ -519,11 +644,11 @@ typedef union
static __cvmx_l2c_tag_t __read_l2_tag(uint64_t assoc, uint64_t index)
{
- uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
+ uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
uint64_t core = cvmx_get_core_num();
__cvmx_l2c_tag_t tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
- uint32_t flags;
+ unsigned long flags;
cvmx_l2c_dbg_t debug_val;
debug_val.u64 = 0;
@@ -536,7 +661,7 @@ static __cvmx_l2c_tag_t __read_l2_tag(uint64_t assoc, uint64_t index)
CVMX_SYNC; /* Make sure core is quiet (no prefetches, etc.) before entering debug mode */
CVMX_DCACHE_INVALIDATE; /* Flush L1 to make sure debug load misses L1 */
- flags = cvmx_interrupt_disable_save();
+ cvmx_local_irq_save(flags);
/* The following must be done in assembly as when in debug mode all data loads from
** L2 return special debug data, not normal memory contents. Also, interrupts must be disabled,
@@ -556,7 +681,7 @@ static __cvmx_l2c_tag_t __read_l2_tag(uint64_t assoc, uint64_t index)
" .set pop \n"
:[tag_val] "=r" (tag_val): [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr) : "memory");
- cvmx_interrupt_restore(flags);
+ cvmx_local_irq_restore(flags);
return(tag_val);
@@ -565,67 +690,89 @@ static __cvmx_l2c_tag_t __read_l2_tag(uint64_t assoc, uint64_t index)
cvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
- __cvmx_l2c_tag_t tmp_tag;
cvmx_l2c_tag_t tag;
tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc())
{
- cvmx_dprintf("ERROR: cvmx_get_l2c_tag association out of range\n");
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return(tag);
}
if ((int)index >= cvmx_l2c_get_num_sets())
{
- cvmx_dprintf("ERROR: cvmx_get_l2c_tag index out of range (arg: %d, max: %d)\n", (int)index, cvmx_l2c_get_num_sets());
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n", (int)index, cvmx_l2c_get_num_sets());
return(tag);
}
- /* __read_l2_tag is intended for internal use only */
- tmp_tag = __read_l2_tag(association, index);
-
- /* Convert all tag structure types to generic version, as it can represent all models */
- if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))
- {
- tag.s.V = tmp_tag.cn58xx.V;
- tag.s.D = tmp_tag.cn58xx.D;
- tag.s.L = tmp_tag.cn58xx.L;
- tag.s.U = tmp_tag.cn58xx.U;
- tag.s.addr = tmp_tag.cn58xx.addr;
- }
- else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
- {
- tag.s.V = tmp_tag.cn38xx.V;
- tag.s.D = tmp_tag.cn38xx.D;
- tag.s.L = tmp_tag.cn38xx.L;
- tag.s.U = tmp_tag.cn38xx.U;
- tag.s.addr = tmp_tag.cn38xx.addr;
- }
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
- {
- tag.s.V = tmp_tag.cn31xx.V;
- tag.s.D = tmp_tag.cn31xx.D;
- tag.s.L = tmp_tag.cn31xx.L;
- tag.s.U = tmp_tag.cn31xx.U;
- tag.s.addr = tmp_tag.cn31xx.addr;
- }
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- {
- tag.s.V = tmp_tag.cn30xx.V;
- tag.s.D = tmp_tag.cn30xx.D;
- tag.s.L = tmp_tag.cn30xx.L;
- tag.s.U = tmp_tag.cn30xx.U;
- tag.s.addr = tmp_tag.cn30xx.addr;
- }
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
{
- tag.s.V = tmp_tag.cn50xx.V;
- tag.s.D = tmp_tag.cn50xx.D;
- tag.s.L = tmp_tag.cn50xx.L;
- tag.s.U = tmp_tag.cn50xx.U;
- tag.s.addr = tmp_tag.cn50xx.addr;
+ cvmx_l2c_tadx_tag_t l2c_tadx_tag;
+ uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ /* Use L2 cache Index load tag cache instruction, as hardware loads
+ the virtual tag for the L2 cache block with the contents of
+ L2C_TAD0_TAG register. */
+ CVMX_CACHE_LTGL2I(address, 0);
+ CVMX_SYNC; // make sure CVMX_L2C_TADX_TAG is updated
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.addr = l2c_tadx_tag.s.tag;
}
else
{
- cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
+ __cvmx_l2c_tag_t tmp_tag;
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /* Convert all tag structure types to generic version, as it can represent all models */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))
+ {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ }
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ }
+ else
+ {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
+ }
}
return tag;
@@ -636,12 +783,33 @@ cvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index)
uint32_t cvmx_l2c_address_to_index (uint64_t addr)
{
uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
- cvmx_l2c_cfg_t l2c_cfg;
- l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ int indxalias = 0;
- if (l2c_cfg.s.idxalias)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
- idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ cvmx_l2c_ctl_t l2c_ctl;
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ indxalias = !l2c_ctl.s.disidxalias;
+ }
+ else
+ {
+ cvmx_l2c_cfg_t l2c_cfg;
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ indxalias = l2c_cfg.s.idxalias;
+ }
+
+ if (indxalias)
+ {
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+ idx ^= idx / cvmx_l2c_get_num_sets();
+ idx ^= a_14_12;
+ }
+ else
+ {
+ idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
}
idx &= CVMX_L2C_IDX_MASK;
return(idx);
@@ -662,7 +830,7 @@ int cvmx_l2c_get_set_bits(void)
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
- else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
l2_set_bits = 10; /* 1024 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
l2_set_bits = 9; /* 512 sets */
@@ -695,7 +863,9 @@ int cvmx_l2c_get_num_assoc(void)
OCTEON_IS_MODEL(OCTEON_CN50XX) ||
OCTEON_IS_MODEL(OCTEON_CN38XX))
l2_assoc = 8;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ l2_assoc = 16;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_assoc = 4;
else
@@ -705,10 +875,40 @@ int cvmx_l2c_get_num_assoc(void)
}
/* Check to see if part of the cache is disabled */
- if (cvmx_fuse_read(265))
- l2_assoc = l2_assoc >> 2;
- else if (cvmx_fuse_read(264))
- l2_assoc = l2_assoc >> 1;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ cvmx_mio_fus_dat3_t mio_fus_dat3;
+
+ mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ /* cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
+ <2> will be not used for 63xx
+ <1> disables 1/2 ways
+ <0> disables 1/4 ways
+ They are cumulative, so for 63xx:
+ <1> <0>
+ 0 0 16-way 2MB cache
+ 0 1 12-way 1.5MB cache
+ 1 0 8-way 1MB cache
+ 1 1 4-way 512KB cache */
+
+ if (mio_fus_dat3.s.l2c_crip == 3)
+ l2_assoc = 4;
+ else if (mio_fus_dat3.s.l2c_crip == 2)
+ l2_assoc = 8;
+ else if (mio_fus_dat3.s.l2c_crip == 1)
+ l2_assoc = 12;
+ }
+ else
+ {
+ cvmx_l2d_fus3_t val;
+ val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ /* Using shifts here, as bit position names are different for
+ each model but they all mean the same. */
+ if ((val.u64 >> 35) & 0x1)
+ l2_assoc = l2_assoc >> 2;
+ else if ((val.u64 >> 34) & 0x1)
+ l2_assoc = l2_assoc >> 1;
+ }
return(l2_assoc);
}
@@ -725,23 +925,492 @@ int cvmx_l2c_get_num_assoc(void)
*/
void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
{
- cvmx_l2c_dbg_t l2cdbg;
+ /* Check the range of the index. */
+ if (index > (uint32_t)cvmx_l2c_get_num_sets())
+ {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
+ return;
+ }
- l2cdbg.u64 = 0;
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.finv = 1;
+ /* Check the range of association. */
+ if (assoc > (uint32_t)cvmx_l2c_get_num_assoc())
+ {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
+ return;
+ }
- l2cdbg.s.set = assoc;
- /* Enter debug mode, and make sure all other writes complete before we
- ** enter debug mode */
- asm volatile ("sync \n"::: "memory");
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ uint64_t address;
+ /* Create the address based on index and association.
+ Bits<20:17> select the way of the cache block involved in
+ the operation
+ Bits<16:7> of the effect address select the index */
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ CVMX_CACHE_WBIL2I(address, 0);
+ }
+ else
+ {
+ cvmx_l2c_dbg_t l2cdbg;
- CVMX_PREPARE_FOR_STORE (((1ULL << 63) + (index)*128), 0);
- /* Exit debug mode */
- asm volatile ("sync \n"::: "memory");
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /* Enter debug mode, and make sure all other writes complete before we
+ ** enter debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE (CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, index*CVMX_CACHE_LINE_SIZE), 0);
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ }
}
#endif
+
+#ifndef CVMX_BUILD_FOR_LINUX_HOST
+
+/* L2C Virtualization APIs. These APIs are based on Octeon II documentation. */
+
+/**
+ * @INTERNAL
+ * Helper function to decode VALUE to number of allowed virtualization IDS.
+ * Returns L2C_VRT_CTL[NUMID].
+ *
+ * @param nvid Number of virtual Ids.
+ * @return On success decode to NUMID, or to -1 on failure.
+ */
+static inline int __cvmx_l2c_vrt_decode_numid(int nvid)
+{
+ int bits = -1;
+ int zero_bits = -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
+ {
+ cvmx_dprintf("WARNING: Invalid number of virtual ids(%d) requested, should be <= 64\n", nvid);
+ return bits;
+ }
+
+ while (nvid)
+ {
+ if ((nvid & 1) == 0)
+ zero_bits++;
+
+ bits++;
+ nvid >>= 1;
+ }
+
+ if (bits == 1 || (zero_bits && ((bits - zero_bits) == 1)))
+ return zero_bits;
+ }
+ return -1;
+}
+
+/**
+ * Set maxium number of Virtual IDs allowed in a machine.
+ *
+ * @param nvid Number of virtial ids allowed in a machine.
+ * @return Return 0 on success or -1 on failure.
+ */
+int cvmx_l2c_vrt_set_max_virtids(int nvid)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ {
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+
+ if (l2c_vrt_ctl.s.enable)
+ {
+ cvmx_dprintf("WARNING: Changing number of Virtual Machine IDs is not allowed after Virtualization is enabled\n");
+ return -1;
+ }
+
+ if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_max_virtids: Invalid number of Virtual Machine IDs(%d) requested, max allowed %d\n", nvid, CVMX_L2C_VRT_MAX_VIRTID_ALLOWED);
+ return -1;
+ }
+
+ /* Calculate the numid based on nvid */
+ l2c_vrt_ctl.s.numid = __cvmx_l2c_vrt_decode_numid(nvid);
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+ }
+ return 0;
+}
+
+/**
+ * Get maxium number of virtual IDs allowed in a machine.
+ *
+ * @return Return number of virtual machine IDs or -1 on failure.
+ */
+int cvmx_l2c_vrt_get_max_virtids(void)
+{
+ int virtids = -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ virtids = 1 << (l2c_vrt_ctl.s.numid + 1);
+ if (virtids > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_get_max_virtids: Invalid number of Virtual IDs initialized (%d)\n", virtids);
+ return -1;
+ }
+ }
+ return virtids;
+}
+
+/**
+ * @INTERNAL
+ * Helper function to decode VALUE to memory space coverage of L2C_VRT_MEM.
+ * Returns L2C_VRT_CTL[MEMSZ].
+ *
+ * @param memsz Memory in GB.
+ * @return On success, decode to MEMSZ, or on failure return -1.
+ */
+static inline int __cvmx_l2c_vrt_decode_memsize(int memsz)
+{
+ int bits = 0;
+ int zero_bits = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ if (memsz == 0 || memsz > CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED)
+ {
+ cvmx_dprintf("WARNING: Invalid virtual memory size(%d) requested, should be <= %d\n", memsz, CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED);
+ return -1;
+ }
+
+ while (memsz)
+ {
+ if ((memsz & 1) == 0)
+ zero_bits++;
+
+ bits++;
+ memsz >>= 1;
+ }
+
+ if (bits == 1 || (bits - zero_bits) == 1)
+ return zero_bits;
+ }
+ return -1;
+}
+
+/**
+ * Set the maxium size of memory space to be allocated for virtualization.
+ *
+ * @param memsz Size of the virtual memory in GB
+ * @return Return 0 on success or -1 on failure.
+ */
+int cvmx_l2c_vrt_set_max_memsz(int memsz)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+ int decode = 0;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+
+ if (l2c_vrt_ctl.s.enable)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Changing the size of the memory after Virtualization is enabled is not allowed.\n");
+ return -1;
+ }
+
+ if (memsz >= (int)(cvmx_sysinfo_get()->system_dram_size / 1000000))
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), greater than available on the chip\n", memsz);
+ return -1;
+ }
+
+ decode = __cvmx_l2c_vrt_decode_memsize(memsz);
+ if (decode == -1)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), refer to L2C_VRT_CTL[MEMSZ] for more information\n", memsz);
+ return -1;
+ }
+
+ l2c_vrt_ctl.s.memsz = decode;
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+ }
+ return 0;
+}
+
+/**
+ * Set a Virtual ID to a set of cores.
+ *
+ * @param virtid Assign virtid to a set of cores.
+ * @param coremask The group of cores to assign a unique virtual id.
+ * @return Return 0 on success, otherwise -1.
+ */
+int cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask)
+{
+ uint32_t core = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ int found = 0;
+ int max_virtid = cvmx_l2c_vrt_get_max_virtids();
+
+ if (virtid > max_virtid)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Max %d number of virtids are allowed, passed %d.\n", max_virtid, virtid);
+ return -1;
+ }
+
+ while (core < cvmx_octeon_num_cores())
+ {
+ if ((coremask >> core) & 1)
+ {
+ cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
+ cvmx_l2c_virtid_iobx_t l2c_virtid_iobx;
+ l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
+
+ /* Check if the core already has a virtid assigned. */
+ if (l2c_virtid_ppx.s.id)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Changing virtid of core #%d to %d from %d.\n",
+ (unsigned int)core, virtid, l2c_virtid_ppx.s.id);
+
+ /* Flush L2 cache to avoid write errors */
+ cvmx_l2c_flush();
+ }
+ cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), virtid & 0x3f);
+
+ /* Set the IOB to normal mode. */
+ l2c_virtid_iobx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_IOBX(core));
+ l2c_virtid_iobx.s.id = 1;
+ l2c_virtid_iobx.s.dwbid = 0;
+ cvmx_write_csr(CVMX_L2C_VIRTID_IOBX(core), l2c_virtid_iobx.u64);
+ found = 1;
+ }
+ core++;
+ }
+
+ /* Invalid coremask passed. */
+ if (!found)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Invalid coremask(0x%x) passed\n", (unsigned int)coremask);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Remove a virt id assigned to a set of cores. Update the virtid mask and
+ * virtid stored for each core.
+ *
+ * @param virtid Remove the specified Virtualization machine ID.
+ */
+void cvmx_l2c_vrt_remove_virtid(int virtid)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ uint32_t core;
+ cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
+
+ for (core = 0; core < cvmx_octeon_num_cores(); core++)
+ {
+ l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
+ if (virtid == l2c_virtid_ppx.s.id)
+ cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), 0);
+ }
+ }
+}
+
+/**
+ * Helper function to protect the memory region based on the granularity.
+ */
+static uint64_t __cvmx_l2c_vrt_get_granularity(void)
+{
+ uint64_t gran = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ int nvid;
+ uint64_t szd;
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ nvid = cvmx_l2c_vrt_get_max_virtids();
+ szd = (1ull << l2c_vrt_ctl.s.memsz) * 1024 * 1024 * 1024;
+ gran = (unsigned long long)(szd * nvid)/(32ull * 1024);
+ }
+ return gran;
+}
+
+/**
+ * Block a memory region to be updated for a given virtual id.
+ *
+ * @param start_addr Starting address of memory region
+ * @param size Size of the memory to protect
+ * @param virtid Virtual ID to use
+ * @param mode Allow/Disallow write access
+ * = 0, Allow write access by virtid
+ * = 1, Disallow write access by virtid
+ */
+int cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ /* Check the alignment of start address, should be aligned to the
+ granularity. */
+ uint64_t gran = __cvmx_l2c_vrt_get_granularity();
+ uint64_t end_addr = start_addr + size;
+ int byte_offset, virtid_offset;
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+ cvmx_l2c_vrt_memx_t l2c_vrt_mem;
+
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+
+ /* No need to protect if virtualization is not enabled */
+ if (!l2c_vrt_ctl.s.enable)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization is not enabled.\n");
+ return -1;
+ }
+
+ if (virtid > cvmx_l2c_vrt_get_max_virtids())
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id is greater than max allowed\n");
+ return -1;
+ }
+
+ /* No need to protect if virtid is not assigned to a core */
+ {
+ cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
+ int found = 0;
+ uint32_t core;
+
+ for (core = 0; core < cvmx_octeon_num_cores(); core++)
+ {
+ l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
+ if (l2c_virtid_ppx.s.id == virtid)
+ {
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0)
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id (%d) is not assigned to any core.\n", virtid);
+ return -1;
+ }
+ }
+
+ /* Make sure previous stores are through before protecting the memory. */
+ CVMX_SYNCW;
+
+ /* If the L2/DRAM physical address is >= 512 MB, subtract 256 MB
+ to get the address to use. This is because L2C removes the 256MB
+ "hole" between DR0 and DR1. */
+ if (start_addr >= (512 * 1024 * 1024))
+ start_addr -= 256 * 1024 * 1024;
+
+ if (start_addr != ((start_addr + (gran - 1)) & ~(gran - 1)))
+ {
+ cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Start address is not aligned\n");
+ return -1;
+ }
+
+ /* Check the size of the memory to protect, should be aligned to the
+ granularity. */
+ if (end_addr != ((end_addr + (gran - 1)) & ~(gran - 1)))
+ {
+ end_addr = (start_addr + (gran - 1)) & ~(gran - 1);
+ size = start_addr - end_addr;
+ }
+
+ byte_offset = l2c_vrt_ctl.s.memsz + l2c_vrt_ctl.s.numid + 16;
+ virtid_offset = 14 - l2c_vrt_ctl.s.numid;
+
+ cvmx_spinlock_lock(&cvmx_l2c_vrt_spinlock);
+
+ /* Enable memory protection for each virtid for the specified range. */
+ while (start_addr < end_addr)
+ {
+ /* When L2C virtualization is enabled and a bit is set in
+ L2C_VRT_MEM(0..1023), then L2C prevents the selected virtual
+ machine from storing to the selected L2C/DRAM region. */
+ int offset, position, i;
+ int l2c_vrt_mem_bit_index = start_addr >> byte_offset;
+ l2c_vrt_mem_bit_index |= (virtid << virtid_offset);
+
+ offset = l2c_vrt_mem_bit_index >> 5;
+ position = l2c_vrt_mem_bit_index & 0x1f;
+
+ l2c_vrt_mem.u64 = cvmx_read_csr(CVMX_L2C_VRT_MEMX(offset));
+ /* Allow/Disallow write access to memory. */
+ if (mode == 0)
+ l2c_vrt_mem.s.data &= ~(1 << position);
+ else
+ l2c_vrt_mem.s.data |= 1 << position;
+ l2c_vrt_mem.s.parity = 0;
+ /* PARITY<i> is the even parity of DATA<i*8+7:i*8>, which means
+ that each bit<i> in PARITY[0..3], is the XOR of all the bits
+ in the corresponding byte in DATA. */
+ for (i = 0; i <= 4; i++)
+ {
+ uint64_t mask = 0xffull << (i*8);
+ if ((cvmx_pop(l2c_vrt_mem.s.data & mask) & 0x1))
+ l2c_vrt_mem.s.parity |= (1ull << i);
+ }
+ cvmx_write_csr(CVMX_L2C_VRT_MEMX(offset), l2c_vrt_mem.u64);
+ start_addr += gran;
+ }
+
+ cvmx_spinlock_unlock(&cvmx_l2c_vrt_spinlock);
+ }
+ return 0;
+}
+#endif
+
+/**
+ * Enable virtualization.
+ *
+ * @param mode Whether out of bound writes are an error.
+ */
+void cvmx_l2c_vrt_enable(int mode)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ /* Enable global virtualization */
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ l2c_vrt_ctl.s.ooberr = mode;
+ l2c_vrt_ctl.s.enable = 1;
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+ }
+}
+
+/**
+ * Disable virtualization.
+ */
+void cvmx_l2c_vrt_disable(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ {
+ cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
+
+ /* Disable global virtualization */
+ l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
+ l2c_vrt_ctl.s.enable = 0;
+ cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
+ }
+}
OpenPOWER on IntegriCloud