summaryrefslogtreecommitdiffstats
path: root/drivers/rapidio/rio-scan.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2012-11-21 10:38:13 +0000
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-11-21 10:38:13 +0000
commit851462444d421c223965b12b836bef63da61b57f (patch)
tree495baa14e638817941496c36e1443aed7dae0ea0 /drivers/rapidio/rio-scan.c
parent5a6ea4af0907f995dc06df21a9c9ef764c7cd3bc (diff)
parent6924d99fcdf1a688538a3cdebd1f135c22eec191 (diff)
downloadop-kernel-dev-851462444d421c223965b12b836bef63da61b57f.zip
op-kernel-dev-851462444d421c223965b12b836bef63da61b57f.tar.gz
Merge branch 'for-3.7' of git://git.infradead.org/users/dedekind/l2-mtd
Conflicts: drivers/mtd/nand/nand_base.c
Diffstat (limited to 'drivers/rapidio/rio-scan.c')
-rw-r--r--drivers/rapidio/rio-scan.c40
1 files changed, 17 insertions, 23 deletions
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 48e9041..07da58b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -55,9 +55,9 @@ static int rio_mport_phys_table[] = {
};
-/*
+/**
* rio_destid_alloc - Allocate next available destID for given network
- * net: RIO network
+ * @net: RIO network
*
* Returns next available device destination ID for the specified RIO network.
* Marks allocated ID as one in use.
@@ -69,14 +69,9 @@ static u16 rio_destid_alloc(struct rio_net *net)
struct rio_id_table *idtab = &net->destid_table;
spin_lock(&idtab->lock);
- destid = find_next_zero_bit(idtab->table, idtab->max, idtab->next);
- if (destid >= idtab->max)
- destid = find_first_zero_bit(idtab->table, idtab->max);
+ destid = find_first_zero_bit(idtab->table, idtab->max);
if (destid < idtab->max) {
- idtab->next = destid + 1;
- if (idtab->next >= idtab->max)
- idtab->next = 0;
set_bit(destid, idtab->table);
destid += idtab->start;
} else
@@ -86,10 +81,10 @@ static u16 rio_destid_alloc(struct rio_net *net)
return (u16)destid;
}
-/*
+/**
* rio_destid_reserve - Reserve the specivied destID
- * net: RIO network
- * destid: destID to reserve
+ * @net: RIO network
+ * @destid: destID to reserve
*
* Tries to reserve the specified destID.
* Returns 0 if successfull.
@@ -106,10 +101,10 @@ static int rio_destid_reserve(struct rio_net *net, u16 destid)
return oldbit;
}
-/*
+/**
* rio_destid_free - free a previously allocated destID
- * net: RIO network
- * destid: destID to free
+ * @net: RIO network
+ * @destid: destID to free
*
* Makes the specified destID available for use.
*/
@@ -123,9 +118,9 @@ static void rio_destid_free(struct rio_net *net, u16 destid)
spin_unlock(&idtab->lock);
}
-/*
+/**
* rio_destid_first - return first destID in use
- * net: RIO network
+ * @net: RIO network
*/
static u16 rio_destid_first(struct rio_net *net)
{
@@ -142,10 +137,10 @@ static u16 rio_destid_first(struct rio_net *net)
return (u16)destid;
}
-/*
+/**
* rio_destid_next - return next destID in use
- * net: RIO network
- * from: destination ID from which search shall continue
+ * @net: RIO network
+ * @from: destination ID from which search shall continue
*/
static u16 rio_destid_next(struct rio_net *net, u16 from)
{
@@ -1163,8 +1158,8 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port,
net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
if (net && do_enum) {
- net->destid_table.table = kzalloc(
- BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)) *
+ net->destid_table.table = kcalloc(
+ BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)),
sizeof(long),
GFP_KERNEL);
@@ -1174,7 +1169,6 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port,
net = NULL;
} else {
net->destid_table.start = start;
- net->destid_table.next = 0;
net->destid_table.max =
RIO_MAX_ROUTE_ENTRIES(port->sys_size);
spin_lock_init(&net->destid_table.lock);
@@ -1391,7 +1385,7 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
while (time_before(jiffies, to_end)) {
if (rio_enum_complete(mport))
goto enum_done;
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+ msleep(10);
}
pr_debug("RIO: discovery timeout on mport %d %s\n",
OpenPOWER on IntegriCloud