summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_rman.c
diff options
context:
space:
mode:
authortruckman <truckman@FreeBSD.org>2014-07-02 17:32:43 +0000
committertruckman <truckman@FreeBSD.org>2014-07-02 17:32:43 +0000
commiteca942517b0f7ca74b31bad5ebc3c7d26f34804a (patch)
treeda1d32ed7a26c796b24cdb659ae6d5b5d78ebb17 /sys/kern/subr_rman.c
parenta33844e926a330bdb5c1966b112da53d2735b82a (diff)
downloadFreeBSD-src-eca942517b0f7ca74b31bad5ebc3c7d26f34804a.zip
FreeBSD-src-eca942517b0f7ca74b31bad5ebc3c7d26f34804a.tar.gz
MFC r266814
Initialize r_flags the same way in all cases using a sanitized copy of flags that has several bits cleared. The RF_WANTED and RF_FIRSTSHARE bits are invalid in this context, and we want to defer setting RF_ACTIVE in r_flags until later. This should make rman_get_flags() return the correct answer in all cases. Add a KASSERT() to catch callers which incorrectly pass the RF_WANTED or RF_FIRSTSHARE flags. Do a strict equality check on the share type bits of flags. In particular, do an equality check on RF_PREFETCHABLE. The previous code would allow one type of mismatch of RF_PREFETCHABLE but disallow the other type of mismatch. Also, ignore the the RF_ALIGNMENT_MASK bits since alignment validity should be handled by the amask check. This field contains an integer value, but previous code did a strange bitwise comparison on it. Leave the original value of flags unmolested as a minor debug aid. Change the start+amask overflow check to a KASSERT() since it is just meant to catch a highly unlikely programming error in the caller. Reviewed by: jhb
Diffstat (limited to 'sys/kern/subr_rman.c')
-rw-r--r--sys/kern/subr_rman.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index 5ae96e9..c466b44 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -435,12 +435,14 @@ rman_adjust_resource(struct resource *rr, u_long start, u_long end)
return (0);
}
+#define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_TIMESHARE | RF_PREFETCHABLE))
+
struct resource *
rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
u_long count, u_long bound, u_int flags,
struct device *dev)
{
- u_int want_activate;
+ u_int new_rflags;
struct resource_i *r, *s, *rv;
u_long rstart, rend, amask, bmask;
@@ -450,8 +452,10 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
"length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
count, flags,
dev == NULL ? "<null>" : device_get_nameunit(dev)));
- want_activate = (flags & RF_ACTIVE);
- flags &= ~RF_ACTIVE;
+ KASSERT((flags & (RF_WANTED | RF_FIRSTSHARE)) == 0,
+ ("invalid flags %#x", flags));
+ new_rflags = (flags & ~(RF_ACTIVE | RF_WANTED | RF_FIRSTSHARE)) |
+ RF_ALLOCATED;
mtx_lock(rm->rm_mtx);
@@ -466,10 +470,8 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
}
amask = (1ul << RF_ALIGNMENT(flags)) - 1;
- if (start > ULONG_MAX - amask) {
- DPRINTF(("start+amask would wrap around\n"));
- goto out;
- }
+ KASSERT(start <= ULONG_MAX - amask,
+ ("start (%#lx) + amask (%#lx) would wrap around", start, amask));
/* If bound is 0, bmask will also be 0 */
bmask = ~(bound - 1);
@@ -522,7 +524,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
if ((s->r_end - s->r_start + 1) == count) {
DPRINTF(("candidate region is entire chunk\n"));
rv = s;
- rv->r_flags |= RF_ALLOCATED | flags;
+ rv->r_flags = new_rflags;
rv->r_dev = dev;
goto out;
}
@@ -542,7 +544,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
goto out;
rv->r_start = rstart;
rv->r_end = rstart + count - 1;
- rv->r_flags = flags | RF_ALLOCATED;
+ rv->r_flags = new_rflags;
rv->r_dev = dev;
rv->r_rm = rm;
@@ -603,7 +605,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
goto out;
for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
- if ((s->r_flags & flags) == flags &&
+ if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
s->r_start >= start &&
(s->r_end - s->r_start + 1) == count &&
(s->r_start & amask) == 0 &&
@@ -613,8 +615,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
goto out;
rv->r_start = s->r_start;
rv->r_end = s->r_end;
- rv->r_flags = s->r_flags &
- (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
+ rv->r_flags = new_rflags;
rv->r_dev = dev;
rv->r_rm = rm;
if (s->r_sharehead == NULL) {
@@ -641,13 +642,12 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
*/
out:
/*
- * If the user specified RF_ACTIVE in the initial flags,
- * which is reflected in `want_activate', we attempt to atomically
+ * If the user specified RF_ACTIVE in flags, we attempt to atomically
* activate the resource. If this fails, we release the resource
* and indicate overall failure. (This behavior probably doesn't
* make sense for RF_TIMESHARE-type resources.)
*/
- if (rv && want_activate) {
+ if (rv && (flags & RF_ACTIVE) != 0) {
struct resource_i *whohas;
if (int_rman_activate_resource(rm, rv, &whohas)) {
int_rman_release_resource(rm, rv);
OpenPOWER on IntegriCloud