From f7fcec93b619337feb9da829b8a9ab6ba86393bc Mon Sep 17 00:00:00 2001
From: Paul Mundt <lethal@linux-sh.org>
Date: Thu, 14 Oct 2010 03:49:15 +0900
Subject: sh: Fix up PMB locking.

This first converts the PMB locking over to raw spinlocks, and secondly
fixes up a nested locking issue that was triggering lockdep early on:

 swapper/0 is trying to acquire lock:
  (&pmbe->lock){......}, at: [<806be9bc>] pmb_init+0xf4/0x4dc

 but task is already holding lock:
  (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc

 other info that might help us debug this:
 1 lock held by swapper/0:
  #0:  (&pmbe->lock){......}, at: [<806be98e>] pmb_init+0xc6/0x4dc

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
---
 arch/sh/mm/pmb.c | 31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

(limited to 'arch/sh/mm/pmb.c')

diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 233c011..b20b1b3 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -40,7 +40,7 @@ struct pmb_entry {
 	unsigned long flags;
 	unsigned long size;
 
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/*
 	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
@@ -265,7 +265,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
 
 	memset(pmbe, 0, sizeof(struct pmb_entry));
 
-	spin_lock_init(&pmbe->lock);
+	raw_spin_lock_init(&pmbe->lock);
 
 	pmbe->vpn	= vpn;
 	pmbe->ppn	= ppn;
@@ -327,9 +327,9 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&pmbe->lock, flags);
+	raw_spin_lock_irqsave(&pmbe->lock, flags);
 	__set_pmb_entry(pmbe);
-	spin_unlock_irqrestore(&pmbe->lock, flags);
+	raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 }
 #endif /* CONFIG_PM */
 
@@ -368,7 +368,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
 				return PTR_ERR(pmbe);
 			}
 
-			spin_lock_irqsave(&pmbe->lock, flags);
+			raw_spin_lock_irqsave(&pmbe->lock, flags);
 
 			pmbe->size = pmb_sizes[i].size;
 
@@ -383,9 +383,10 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
 			 * entries for easier tear-down.
 			 */
 			if (likely(pmbp)) {
-				spin_lock(&pmbp->lock);
+				raw_spin_lock_nested(&pmbp->lock,
+						     SINGLE_DEPTH_NESTING);
 				pmbp->link = pmbe;
-				spin_unlock(&pmbp->lock);
+				raw_spin_unlock(&pmbp->lock);
 			}
 
 			pmbp = pmbe;
@@ -398,7 +399,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
 			i--;
 			mapped++;
 
-			spin_unlock_irqrestore(&pmbe->lock, flags);
+			raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 		}
 	} while (size >= SZ_16M);
 
@@ -627,15 +628,14 @@ static void __init pmb_synchronize(void)
 			continue;
 		}
 
-		spin_lock_irqsave(&pmbe->lock, irqflags);
+		raw_spin_lock_irqsave(&pmbe->lock, irqflags);
 
 		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
 			if (pmb_sizes[j].flag == size)
 				pmbe->size = pmb_sizes[j].size;
 
 		if (pmbp) {
-			spin_lock(&pmbp->lock);
-
+			raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
 			/*
 			 * Compare the previous entry against the current one to
 			 * see if the entries span a contiguous mapping. If so,
@@ -644,13 +644,12 @@ static void __init pmb_synchronize(void)
 			 */
 			if (pmb_can_merge(pmbp, pmbe))
 				pmbp->link = pmbe;
-
-			spin_unlock(&pmbp->lock);
+			raw_spin_unlock(&pmbp->lock);
 		}
 
 		pmbp = pmbe;
 
-		spin_unlock_irqrestore(&pmbe->lock, irqflags);
+		raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
 	}
 }
 
@@ -757,7 +756,7 @@ static void __init pmb_resize(void)
 		/*
 		 * Found it, now resize it.
 		 */
-		spin_lock_irqsave(&pmbe->lock, flags);
+		raw_spin_lock_irqsave(&pmbe->lock, flags);
 
 		pmbe->size = SZ_16M;
 		pmbe->flags &= ~PMB_SZ_MASK;
@@ -767,7 +766,7 @@ static void __init pmb_resize(void)
 
 		__set_pmb_entry(pmbe);
 
-		spin_unlock_irqrestore(&pmbe->lock, flags);
+		raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 	}
 
 	read_unlock(&pmb_rwlock);
-- 
cgit v1.1