summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2011-12-09 17:58:35 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-12-11 08:35:50 +0000
commit786a767465b12cb4c1a45421b12fbf6bff45b0ea (patch)
tree782492ca819b0176fca17e1be242f01bdf4afd65
parent8878a539ff19a43cf3729e7562cd528f490246ae (diff)
downloadop-kernel-dev-786a767465b12cb4c1a45421b12fbf6bff45b0ea.zip
op-kernel-dev-786a767465b12cb4c1a45421b12fbf6bff45b0ea.tar.gz
ARM: 7201/1: add EDAC atomic_scrub function
Add support for architecture specific EDAC atomic_scrub to ARM. Only ARMv6+ is implemented as ldrex/strex instructions are needed. Supporting EDAC on ARMv5 or earlier is unlikely at this point anyway. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/edac.h48
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
new file mode 100644
index 0000000..0df7a2c
--- /dev/null
+++ b/arch/arm/include/asm/edac.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2011 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+#if __LINUX_ARM_ARCH__ >= 6
+ unsigned int *virt_addr = va;
+ unsigned int temp, temp2;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %1, %0, [%2]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ : "=&r"(temp), "=&r"(temp2)
+ : "r"(virt_addr)
+ : "cc");
+ }
+#endif
+}
+
+#endif
OpenPOWER on IntegriCloud