summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-28 20:19:38 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-01-28 13:18:58 +0900
commita23ba43573a24c351640bc19c06c701798fe6e25 (patch)
treeb0d1481379ebe4d4345b7358ddffe16cf6d5c6f6 /arch/sh/mm
parent4b27c47cf8eddb4153a026e89c7b092598c98b12 (diff)
downloadop-kernel-dev-a23ba43573a24c351640bc19c06c701798fe6e25.zip
op-kernel-dev-a23ba43573a24c351640bc19c06c701798fe6e25.tar.gz
sh: comment tidying for sh64->sh migration.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/cache-sh5.c15
-rw-r--r--arch/sh/mm/extable_64.c28
-rw-r--r--arch/sh/mm/tlb-sh5.c4
3 files changed, 22 insertions, 25 deletions
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 421487c..4617e3a 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -1,18 +1,15 @@
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * arch/sh64/mm/cache.c
+ * arch/sh/mm/cache-sh5.c
*
* Original version Copyright (C) 2000, 2001 Paolo Alberelli
* Second version Copyright (C) benedict.gaster@superh.com 2002
* Third version Copyright Richard.Curnow@superh.com 2003
* Hacks to third version Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
-
-/****************************************************************************/
-
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
@@ -146,7 +143,7 @@ int __init sh64_cache_init(void)
/* The following group of functions deal with mapping and unmapping a temporary
page into the DTLB slot that have been set aside for our exclusive use. */
/* In order to accomplish this, we use the generic interface for adding and
- removing a wired slot entry as defined in arch/sh64/mm/tlb.c */
+ removing a wired slot entry as defined in arch/sh/mm/tlb-sh5.c */
/****************************************************************************/
static unsigned long slot_own_flags;
diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
index a2e6e05..f054996 100644
--- a/arch/sh/mm/extable_64.c
+++ b/arch/sh/mm/extable_64.c
@@ -1,14 +1,14 @@
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * arch/sh64/mm/extable.c
+ * arch/sh/mm/extable_64.c
*
* Copyright (C) 2003 Richard Curnow
* Copyright (C) 2003, 2004 Paul Mundt
*
* Cloned from the 2.5 SH version..
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/rwsem.h>
#include <linux/module.h>
@@ -21,13 +21,16 @@ static const struct exception_table_entry __copy_user_fixup_ex = {
.fixup = (unsigned long)&__copy_user_fixup,
};
-/* Some functions that may trap due to a bad user-mode address have too many loads
- and stores in them to make it at all practical to label each one and put them all in
- the main exception table.
-
- In particular, the fast memcpy routine is like this. It's fix-up is just to fall back
- to a slow byte-at-a-time copy, which is handled the conventional way. So it's functionally
- OK to just handle any trap occurring in the fast memcpy with that fixup. */
+/*
+ * Some functions that may trap due to a bad user-mode address have too
+ * many loads and stores in them to make it at all practical to label
+ * each one and put them all in the main exception table.
+ *
+ * In particular, the fast memcpy routine is like this. It's fix-up is
+ * just to fall back to a slow byte-at-a-time copy, which is handled the
+ * conventional way. So it's functionally OK to just handle any trap
+ * occurring in the fast memcpy with that fixup.
+ */
static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
{
if ((addr >= (unsigned long)&copy_user_memcpy) &&
@@ -77,4 +80,3 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
-
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index d517e7d..f34274a 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -1,5 +1,5 @@
/*
- * arch/sh64/mm/tlb.c
+ * arch/sh/mm/tlb-sh5.c
*
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
* Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
@@ -7,7 +7,6 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- *
*/
#include <linux/mm.h>
#include <linux/init.h>
@@ -163,4 +162,3 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr,
*/
inline void sh64_teardown_tlb_slot(unsigned long long config_addr)
__attribute__ ((alias("__flush_tlb_slot")));
-
OpenPOWER on IntegriCloud