summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_rman.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2011-04-29 20:05:19 +0000
committerjhb <jhb@FreeBSD.org>2011-04-29 20:05:19 +0000
commit22b381b7217c2644a04f90b803d07661633ac04c (patch)
tree68553f76dd359b9f2d5933c94bc812021fc6bf4c /sys/kern/subr_rman.c
parent08955ceac0e6a5e070d655c0f47e170747a075d7 (diff)
downloadFreeBSD-src-22b381b7217c2644a04f90b803d07661633ac04c.zip
FreeBSD-src-22b381b7217c2644a04f90b803d07661633ac04c.tar.gz
Extend the rman(9) API to support altering an existing resource.
Specifically, these changes allow a resource to back a relocatable and resizable resource such as the I/O window decoders in PCI-PCI bridges. - rman_adjust_resource() can adjust the start and end address of an existing resource. It only succeeds if the newly requested address space is already free. It also supports shrinking a resource in which case the freed space will be marked unallocated in the rman. - rman_first_free_region() and rman_last_free_region() return the start and end addresses for the first or last unallocated region in an rman, respectively. This can be used to determine by how much the resource backing an rman must be adjusted to accomodate an allocation request that does not fit into the existing rman. While here, document the rm_start and rm_end fields in struct rman, rman_is_region_manager(), the bound argument to rman_reserve_resource_bound(), and rman_init_from_resource().
Diffstat (limited to 'sys/kern/subr_rman.c')
-rw-r--r--sys/kern/subr_rman.c158
1 files changed, 158 insertions, 0 deletions
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index 4352278..3014b19 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -272,6 +272,164 @@ rman_fini(struct rman *rm)
return 0;
}
+int
+rman_first_free_region(struct rman *rm, u_long *start, u_long *end)
+{
+ struct resource_i *r;
+
+ mtx_lock(rm->rm_mtx);
+ TAILQ_FOREACH(r, &rm->rm_list, r_link) {
+ if (!(r->r_flags & RF_ALLOCATED)) {
+ *start = r->r_start;
+ *end = r->r_end;
+ mtx_unlock(rm->rm_mtx);
+ return (0);
+ }
+ }
+ mtx_unlock(rm->rm_mtx);
+ return (ENOENT);
+}
+
+int
+rman_last_free_region(struct rman *rm, u_long *start, u_long *end)
+{
+ struct resource_i *r;
+
+ mtx_lock(rm->rm_mtx);
+ TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
+ if (!(r->r_flags & RF_ALLOCATED)) {
+ *start = r->r_start;
+ *end = r->r_end;
+ mtx_unlock(rm->rm_mtx);
+ return (0);
+ }
+ }
+ mtx_unlock(rm->rm_mtx);
+ return (ENOENT);
+}
+
+/* Shrink or extend one or both ends of an allocated resource. */
+int
+rman_adjust_resource(struct resource *rr, u_long start, u_long end)
+{
+ struct resource_i *r, *s, *t, *new;
+ struct rman *rm;
+
+ /* Not supported for shared resources. */
+ r = rr->__r_i;
+ if (r->r_flags & (RF_TIMESHARE | RF_SHAREABLE))
+ return (EINVAL);
+
+ /*
+ * This does not support wholesale moving of a resource. At
+ * least part of the desired new range must overlap with the
+ * existing resource.
+ */
+ if (end < r->r_start || r->r_end < start)
+ return (EINVAL);
+
+ /*
+ * Find the two resource regions immediately adjacent to the
+ * allocated resource.
+ */
+ rm = r->r_rm;
+ mtx_lock(rm->rm_mtx);
+#ifdef INVARIANTS
+ TAILQ_FOREACH(s, &rm->rm_list, r_link) {
+ if (s == r)
+ break;
+ }
+ if (s == NULL)
+ panic("resource not in list");
+#endif
+ s = TAILQ_PREV(r, resource_head, r_link);
+ t = TAILQ_NEXT(r, r_link);
+ KASSERT(s == NULL || s->r_end + 1 == r->r_start,
+ ("prev resource mismatch"));
+ KASSERT(t == NULL || r->r_end + 1 == t->r_start,
+ ("next resource mismatch"));
+
+ /*
+ * See if the changes are permitted. Shrinking is always allowed,
+ * but growing requires sufficient room in the adjacent region.
+ */
+ if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
+ s->r_start > start)) {
+ mtx_unlock(rm->rm_mtx);
+ return (EBUSY);
+ }
+ if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
+ t->r_end < end)) {
+ mtx_unlock(rm->rm_mtx);
+ return (EBUSY);
+ }
+
+ /*
+ * While holding the lock, grow either end of the resource as
+ * needed and shrink either end if the shrinking does not require
+ * allocating a new resource. We can safely drop the lock and then
+ * insert a new range to handle the shrinking case afterwards.
+ */
+ if (start < r->r_start ||
+ (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
+ KASSERT(s->r_flags == 0, ("prev is busy"));
+ r->r_start = start;
+ if (s->r_start == start) {
+ TAILQ_REMOVE(&rm->rm_list, s, r_link);
+ free(s, M_RMAN);
+ } else
+ s->r_end = start - 1;
+ }
+ if (end > r->r_end ||
+ (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
+ KASSERT(t->r_flags == 0, ("next is busy"));
+ r->r_end = end;
+ if (t->r_end == end) {
+ TAILQ_REMOVE(&rm->rm_list, t, r_link);
+ free(t, M_RMAN);
+ } else
+ t->r_start = end + 1;
+ }
+ mtx_unlock(rm->rm_mtx);
+
+ /*
+ * Handle the shrinking cases that require allocating a new
+ * resource to hold the newly-free region. We have to recheck
+ * if we still need this new region after acquiring the lock.
+ */
+ if (start > r->r_start) {
+ new = int_alloc_resource(M_WAITOK);
+ new->r_start = r->r_start;
+ new->r_end = start - 1;
+ new->r_rm = rm;
+ mtx_lock(rm->rm_mtx);
+ r->r_start = start;
+ s = TAILQ_PREV(r, resource_head, r_link);
+ if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
+ s->r_end = start - 1;
+ free(new, M_RMAN);
+ } else
+ TAILQ_INSERT_BEFORE(r, new, r_link);
+ mtx_unlock(rm->rm_mtx);
+ }
+ if (end < r->r_end) {
+ new = int_alloc_resource(M_WAITOK);
+ new->r_start = end + 1;
+ new->r_end = r->r_end;
+ new->r_rm = rm;
+ mtx_lock(rm->rm_mtx);
+ r->r_end = end;
+ t = TAILQ_NEXT(r, r_link);
+ if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
+ t->r_start = end + 1;
+ free(new, M_RMAN);
+ } else
+ TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
+ mtx_unlock(rm->rm_mtx);
+ }
+ return (0);
+}
+
struct resource *
rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
u_long count, u_long bound, u_int flags,
OpenPOWER on IntegriCloud