summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortrasz <trasz@FreeBSD.org>2010-12-02 17:37:16 +0000
committertrasz <trasz@FreeBSD.org>2010-12-02 17:37:16 +0000
commite5fb69509c12904dfc9fb137ed1dbcafc2e86e29 (patch)
treeea2ce0208bf11af89bb5734c7f42ac4669677e31
parent789892d7ed5efe3e04bd964f26650d2043686819 (diff)
downloadFreeBSD-src-e5fb69509c12904dfc9fb137ed1dbcafc2e86e29.zip
FreeBSD-src-e5fb69509c12904dfc9fb137ed1dbcafc2e86e29.tar.gz
Replace pointer to "struct uidinfo" with pointer to "struct ucred"
in "struct vm_object". This is required to make it possible to account for per-jail swap usage. Reviewed by: kib@ Tested by: pho@ Sponsored by: FreeBSD Foundation
-rw-r--r--sys/fs/procfs/procfs_map.c6
-rw-r--r--sys/kern/uipc_shm.c4
-rw-r--r--sys/vm/default_pager.c8
-rw-r--r--sys/vm/swap_pager.c34
-rw-r--r--sys/vm/vm.h6
-rw-r--r--sys/vm/vm_fault.c10
-rw-r--r--sys/vm/vm_map.c212
-rw-r--r--sys/vm/vm_map.h2
-rw-r--r--sys/vm/vm_object.c32
-rw-r--r--sys/vm/vm_object.h2
10 files changed, 158 insertions, 158 deletions
diff --git a/sys/fs/procfs/procfs_map.c b/sys/fs/procfs/procfs_map.c
index 2622d1e..5b322eb 100644
--- a/sys/fs/procfs/procfs_map.c
+++ b/sys/fs/procfs/procfs_map.c
@@ -83,7 +83,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
vm_map_entry_t entry, tmp_entry;
struct vnode *vp;
char *fullpath, *freepath;
- struct uidinfo *uip;
+ struct ucred *cred;
int error, vfslocked;
unsigned int last_timestamp;
#ifdef COMPAT_FREEBSD32
@@ -136,7 +136,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
if (obj->shadow_count == 1)
privateresident = obj->resident_page_count;
}
- uip = (entry->uip) ? entry->uip : (obj ? obj->uip : NULL);
+ cred = (entry->cred) ? entry->cred : (obj ? obj->cred : NULL);
resident = 0;
addr = entry->start;
@@ -221,7 +221,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
(e_eflags & MAP_ENTRY_COW)?"COW":"NCOW",
(e_eflags & MAP_ENTRY_NEEDS_COPY)?"NC":"NNC",
type, fullpath,
- uip ? "CH":"NCH", uip ? uip->ui_uid : -1);
+ cred ? "CH":"NCH", cred ? cred->cr_ruid : -1);
if (freepath != NULL)
free(freepath, M_TEMP);
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 2e37463..cef8317 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -271,7 +271,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
swap_pager_freespace(object, nobjsize, delta);
/* Free the swap accounted for shm */
- swap_release_by_uid(delta, object->uip);
+ swap_release_by_cred(delta, object->cred);
object->charge -= delta;
/*
@@ -314,7 +314,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
/* Attempt to reserve the swap */
delta = ptoa(nobjsize - object->size);
- if (!swap_reserve_by_uid(delta, object->uip)) {
+ if (!swap_reserve_by_cred(delta, object->cred)) {
VM_OBJECT_UNLOCK(object);
return (ENOMEM);
}
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index ceb2c77..12dc823 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -80,21 +80,19 @@ default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
- struct uidinfo *uip;
if (handle != NULL)
panic("default_pager_alloc: handle specified");
if (cred != NULL) {
- uip = cred->cr_ruidinfo;
- if (!swap_reserve_by_uid(size, uip))
+ if (!swap_reserve_by_cred(size, cred))
return (NULL);
- uihold(uip);
+ crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(round_page(offset + size)));
if (cred != NULL) {
VM_OBJECT_LOCK(object);
- object->uip = uip;
+ object->cred = cred;
object->charge = size;
VM_OBJECT_UNLOCK(object);
}
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index bea235a..35a6df5 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -174,16 +174,19 @@ int
swap_reserve(vm_ooffset_t incr)
{
- return (swap_reserve_by_uid(incr, curthread->td_ucred->cr_ruidinfo));
+ return (swap_reserve_by_cred(incr, curthread->td_ucred));
}
int
-swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip)
+swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
{
vm_ooffset_t r, s;
int res, error;
static int curfail;
static struct timeval lastfail;
+ struct uidinfo *uip;
+
+ uip = cred->cr_ruidinfo;
if (incr & PAGE_MASK)
panic("swap_reserve: & PAGE_MASK");
@@ -249,17 +252,20 @@ swap_reserve_force(vm_ooffset_t incr)
void
swap_release(vm_ooffset_t decr)
{
- struct uidinfo *uip;
+ struct ucred *cred;
PROC_LOCK(curproc);
- uip = curthread->td_ucred->cr_ruidinfo;
- swap_release_by_uid(decr, uip);
+ cred = curthread->td_ucred;
+ swap_release_by_cred(decr, cred);
PROC_UNLOCK(curproc);
}
void
-swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip)
+swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
{
+ struct uidinfo *uip;
+
+ uip = cred->cr_ruidinfo;
if (decr & PAGE_MASK)
panic("swap_release: & PAGE_MASK");
@@ -579,9 +585,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
{
vm_object_t object;
vm_pindex_t pindex;
- struct uidinfo *uip;
- uip = NULL;
pindex = OFF_TO_IDX(offset + PAGE_MASK + size);
if (handle) {
mtx_lock(&Giant);
@@ -595,19 +599,18 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
if (object == NULL) {
if (cred != NULL) {
- uip = cred->cr_ruidinfo;
- if (!swap_reserve_by_uid(size, uip)) {
+ if (!swap_reserve_by_cred(size, cred)) {
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
return (NULL);
}
- uihold(uip);
+ crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
VM_OBJECT_LOCK(object);
object->handle = handle;
if (cred != NULL) {
- object->uip = uip;
+ object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
@@ -617,15 +620,14 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
mtx_unlock(&Giant);
} else {
if (cred != NULL) {
- uip = cred->cr_ruidinfo;
- if (!swap_reserve_by_uid(size, uip))
+ if (!swap_reserve_by_cred(size, cred))
return (NULL);
- uihold(uip);
+ crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
VM_OBJECT_LOCK(object);
if (cred != NULL) {
- object->uip = uip;
+ object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 6dd3cbe..67cc922 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -141,12 +141,12 @@ struct kva_md_info {
extern struct kva_md_info kmi;
extern void vm_ksubmap_init(struct kva_md_info *);
-struct uidinfo;
+struct ucred;
int swap_reserve(vm_ooffset_t incr);
-int swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip);
+int swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred);
void swap_reserve_force(vm_ooffset_t incr);
void swap_release(vm_ooffset_t decr);
-void swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip);
+void swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred);
#endif /* VM_H */
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 463bd1f..11489a8 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1166,14 +1166,14 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_entry->offset = 0;
dst_object->charge = dst_entry->end - dst_entry->start;
if (fork_charge != NULL) {
- KASSERT(dst_entry->uip == NULL,
+ KASSERT(dst_entry->cred == NULL,
("vm_fault_copy_entry: leaked swp charge"));
- dst_object->uip = curthread->td_ucred->cr_ruidinfo;
- uihold(dst_object->uip);
+ dst_object->cred = curthread->td_ucred;
+ crhold(dst_object->cred);
*fork_charge += dst_object->charge;
} else {
- dst_object->uip = dst_entry->uip;
- dst_entry->uip = NULL;
+ dst_object->cred = dst_entry->cred;
+ dst_entry->cred = NULL;
}
access = prot = dst_entry->protection;
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index ad71323..35552a6 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -137,8 +137,8 @@ static void vm_map_zdtor(void *mem, int size, void *arg);
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
-#define ENTRY_CHARGED(e) ((e)->uip != NULL || \
- ((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \
+#define ENTRY_CHARGED(e) ((e)->cred != NULL || \
+ ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
!((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
/*
@@ -1095,7 +1095,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
- struct uidinfo *uip;
+ struct ucred *cred;
boolean_t charge_prev_obj;
VM_MAP_ASSERT_LOCKED(map);
@@ -1140,7 +1140,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (cow & MAP_DISABLE_COREDUMP)
protoeflags |= MAP_ENTRY_NOCOREDUMP;
- uip = NULL;
+ cred = NULL;
KASSERT((object != kmem_object && object != kernel_object) ||
((object == kmem_object || object == kernel_object) &&
!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
@@ -1152,10 +1152,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
return (KERN_RESOURCE_SHORTAGE);
KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
- object->uip == NULL,
+ object->cred == NULL,
("OVERCOMMIT: vm_map_insert o %p", object));
- uip = curthread->td_ucred->cr_ruidinfo;
- uihold(uip);
+ cred = curthread->td_ucred;
+ crhold(cred);
if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
charge_prev_obj = TRUE;
}
@@ -1181,9 +1181,9 @@ charged:
(prev_entry->eflags == protoeflags) &&
(prev_entry->end == start) &&
(prev_entry->wired_count == 0) &&
- (prev_entry->uip == uip ||
+ (prev_entry->cred == cred ||
(prev_entry->object.vm_object != NULL &&
- (prev_entry->object.vm_object->uip == uip))) &&
+ (prev_entry->object.vm_object->cred == cred))) &&
vm_object_coalesce(prev_entry->object.vm_object,
prev_entry->offset,
(vm_size_t)(prev_entry->end - prev_entry->start),
@@ -1200,8 +1200,8 @@ charged:
prev_entry->end = end;
vm_map_entry_resize_free(map, prev_entry);
vm_map_simplify_entry(map, prev_entry);
- if (uip != NULL)
- uifree(uip);
+ if (cred != NULL)
+ crfree(cred);
return (KERN_SUCCESS);
}
@@ -1215,11 +1215,11 @@ charged:
offset = prev_entry->offset +
(prev_entry->end - prev_entry->start);
vm_object_reference(object);
- if (uip != NULL && object != NULL && object->uip != NULL &&
+ if (cred != NULL && object != NULL && object->cred != NULL &&
!(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
/* Object already accounts for this uid. */
- uifree(uip);
- uip = NULL;
+ crfree(cred);
+ cred = NULL;
}
}
@@ -1235,7 +1235,7 @@ charged:
new_entry = vm_map_entry_create(map);
new_entry->start = start;
new_entry->end = end;
- new_entry->uip = NULL;
+ new_entry->cred = NULL;
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
@@ -1247,9 +1247,9 @@ charged:
new_entry->max_protection = max;
new_entry->wired_count = 0;
- KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry),
+ KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
- new_entry->uip = uip;
+ new_entry->cred = cred;
/*
* Insert the new entry into the list
@@ -1466,7 +1466,7 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
(prev->max_protection == entry->max_protection) &&
(prev->inheritance == entry->inheritance) &&
(prev->wired_count == entry->wired_count) &&
- (prev->uip == entry->uip)) {
+ (prev->cred == entry->cred)) {
vm_map_entry_unlink(map, prev);
entry->start = prev->start;
entry->offset = prev->offset;
@@ -1484,8 +1484,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
*/
if (prev->object.vm_object)
vm_object_deallocate(prev->object.vm_object);
- if (prev->uip != NULL)
- uifree(prev->uip);
+ if (prev->cred != NULL)
+ crfree(prev->cred);
vm_map_entry_dispose(map, prev);
}
}
@@ -1502,7 +1502,7 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
(next->max_protection == entry->max_protection) &&
(next->inheritance == entry->inheritance) &&
(next->wired_count == entry->wired_count) &&
- (next->uip == entry->uip)) {
+ (next->cred == entry->cred)) {
vm_map_entry_unlink(map, next);
entry->end = next->end;
vm_map_entry_resize_free(map, entry);
@@ -1512,8 +1512,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
*/
if (next->object.vm_object)
vm_object_deallocate(next->object.vm_object);
- if (next->uip != NULL)
- uifree(next->uip);
+ if (next->cred != NULL)
+ crfree(next->cred);
vm_map_entry_dispose(map, next);
}
}
@@ -1562,21 +1562,21 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
- if (entry->uip != NULL) {
- object->uip = entry->uip;
+ if (entry->cred != NULL) {
+ object->cred = entry->cred;
object->charge = entry->end - entry->start;
- entry->uip = NULL;
+ entry->cred = NULL;
}
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
- entry->uip != NULL) {
+ entry->cred != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
- KASSERT(entry->object.vm_object->uip == NULL,
- ("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry));
- entry->object.vm_object->uip = entry->uip;
+ KASSERT(entry->object.vm_object->cred == NULL,
+ ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
+ entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
VM_OBJECT_UNLOCK(entry->object.vm_object);
- entry->uip = NULL;
+ entry->cred = NULL;
}
new_entry = vm_map_entry_create(map);
@@ -1585,8 +1585,8 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
new_entry->end = start;
entry->offset += (start - entry->start);
entry->start = start;
- if (new_entry->uip != NULL)
- uihold(entry->uip);
+ if (new_entry->cred != NULL)
+ crhold(entry->cred);
vm_map_entry_link(map, entry->prev, new_entry);
@@ -1632,21 +1632,21 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
- if (entry->uip != NULL) {
- object->uip = entry->uip;
+ if (entry->cred != NULL) {
+ object->cred = entry->cred;
object->charge = entry->end - entry->start;
- entry->uip = NULL;
+ entry->cred = NULL;
}
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
- entry->uip != NULL) {
+ entry->cred != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
- KASSERT(entry->object.vm_object->uip == NULL,
- ("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry));
- entry->object.vm_object->uip = entry->uip;
+ KASSERT(entry->object.vm_object->cred == NULL,
+ ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
+ entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
VM_OBJECT_UNLOCK(entry->object.vm_object);
- entry->uip = NULL;
+ entry->cred = NULL;
}
/*
@@ -1657,8 +1657,8 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
new_entry->start = entry->end = end;
new_entry->offset += (end - entry->start);
- if (new_entry->uip != NULL)
- uihold(entry->uip);
+ if (new_entry->cred != NULL)
+ crhold(entry->cred);
vm_map_entry_link(map, entry, new_entry);
@@ -1812,7 +1812,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
{
vm_map_entry_t current, entry;
vm_object_t obj;
- struct uidinfo *uip;
+ struct ucred *cred;
vm_prot_t old_prot;
vm_map_lock(map);
@@ -1858,7 +1858,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
continue;
}
- uip = curthread->td_ucred->cr_ruidinfo;
+ cred = curthread->td_ucred;
obj = current->object.vm_object;
if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
@@ -1866,8 +1866,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
- uihold(uip);
- current->uip = uip;
+ crhold(cred);
+ current->cred = cred;
continue;
}
@@ -1890,8 +1890,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
return (KERN_RESOURCE_SHORTAGE);
}
- uihold(uip);
- obj->uip = uip;
+ crhold(cred);
+ obj->cred = cred;
obj->charge = ptoa(obj->size);
VM_OBJECT_UNLOCK(obj);
}
@@ -2640,16 +2640,16 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
size = entry->end - entry->start;
map->size -= size;
- if (entry->uip != NULL) {
- swap_release_by_uid(size, entry->uip);
- uifree(entry->uip);
+ if (entry->cred != NULL) {
+ swap_release_by_cred(size, entry->cred);
+ crfree(entry->cred);
}
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
(object != NULL)) {
- KASSERT(entry->uip == NULL || object->uip == NULL ||
+ KASSERT(entry->cred == NULL || object->cred == NULL ||
(entry->eflags & MAP_ENTRY_NEEDS_COPY),
- ("OVERCOMMIT vm_map_entry_delete: both uip %p", entry));
+ ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
@@ -2665,11 +2665,11 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
offidxstart < object->size) {
size1 = object->size;
object->size = offidxstart;
- if (object->uip != NULL) {
+ if (object->cred != NULL) {
size1 -= object->size;
KASSERT(object->charge >= ptoa(size1),
("vm_map_entry_delete: object->charge < 0"));
- swap_release_by_uid(ptoa(size1), object->uip);
+ swap_release_by_cred(ptoa(size1), object->cred);
object->charge -= ptoa(size1);
}
}
@@ -2855,7 +2855,7 @@ vm_map_copy_entry(
{
vm_object_t src_object;
vm_offset_t size;
- struct uidinfo *uip;
+ struct ucred *cred;
int charged;
VM_MAP_ASSERT_LOCKED(dst_map);
@@ -2894,25 +2894,25 @@ vm_map_copy_entry(
}
vm_object_reference_locked(src_object);
vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
- if (src_entry->uip != NULL &&
+ if (src_entry->cred != NULL &&
!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
- KASSERT(src_object->uip == NULL,
- ("OVERCOMMIT: vm_map_copy_entry: uip %p",
+ KASSERT(src_object->cred == NULL,
+ ("OVERCOMMIT: vm_map_copy_entry: cred %p",
src_object));
- src_object->uip = src_entry->uip;
+ src_object->cred = src_entry->cred;
src_object->charge = size;
}
VM_OBJECT_UNLOCK(src_object);
dst_entry->object.vm_object = src_object;
if (charged) {
- uip = curthread->td_ucred->cr_ruidinfo;
- uihold(uip);
- dst_entry->uip = uip;
+ cred = curthread->td_ucred;
+ crhold(cred);
+ dst_entry->cred = cred;
*fork_charge += size;
if (!(src_entry->eflags &
MAP_ENTRY_NEEDS_COPY)) {
- uihold(uip);
- src_entry->uip = uip;
+ crhold(cred);
+ src_entry->cred = cred;
*fork_charge += size;
}
}
@@ -2922,9 +2922,9 @@ vm_map_copy_entry(
} else {
dst_entry->object.vm_object = NULL;
dst_entry->offset = 0;
- if (src_entry->uip != NULL) {
- dst_entry->uip = curthread->td_ucred->cr_ruidinfo;
- uihold(dst_entry->uip);
+ if (src_entry->cred != NULL) {
+ dst_entry->cred = curthread->td_ucred;
+ crhold(dst_entry->cred);
*fork_charge += size;
}
}
@@ -3026,11 +3026,11 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
atop(old_entry->end - old_entry->start));
old_entry->object.vm_object = object;
old_entry->offset = 0;
- if (old_entry->uip != NULL) {
- object->uip = old_entry->uip;
+ if (old_entry->cred != NULL) {
+ object->cred = old_entry->cred;
object->charge = old_entry->end -
old_entry->start;
- old_entry->uip = NULL;
+ old_entry->cred = NULL;
}
}
@@ -3058,11 +3058,11 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
}
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
- if (old_entry->uip != NULL) {
- KASSERT(object->uip == NULL, ("vmspace_fork both uip"));
- object->uip = old_entry->uip;
+ if (old_entry->cred != NULL) {
+ KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
+ object->cred = old_entry->cred;
object->charge = old_entry->end - old_entry->start;
- old_entry->uip = NULL;
+ old_entry->cred = NULL;
}
VM_OBJECT_UNLOCK(object);
@@ -3102,7 +3102,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
MAP_ENTRY_IN_TRANSITION);
new_entry->wired_count = 0;
new_entry->object.vm_object = NULL;
- new_entry->uip = NULL;
+ new_entry->cred = NULL;
vm_map_entry_link(new_map, new_map->header.prev,
new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
@@ -3241,7 +3241,7 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
size_t grow_amount, max_grow;
rlim_t stacklim, vmemlim;
int is_procstack, rv;
- struct uidinfo *uip;
+ struct ucred *cred;
Retry:
PROC_LOCK(p);
@@ -3412,17 +3412,17 @@ Retry:
}
grow_amount = addr - stack_entry->end;
- uip = stack_entry->uip;
- if (uip == NULL && stack_entry->object.vm_object != NULL)
- uip = stack_entry->object.vm_object->uip;
- if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip))
+ cred = stack_entry->cred;
+ if (cred == NULL && stack_entry->object.vm_object != NULL)
+ cred = stack_entry->object.vm_object->cred;
+ if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
rv = KERN_NO_SPACE;
/* Grow the underlying object if applicable. */
else if (stack_entry->object.vm_object == NULL ||
vm_object_coalesce(stack_entry->object.vm_object,
stack_entry->offset,
(vm_size_t)(stack_entry->end - stack_entry->start),
- (vm_size_t)grow_amount, uip != NULL)) {
+ (vm_size_t)grow_amount, cred != NULL)) {
map->size += (addr - stack_entry->end);
/* Update the current entry. */
stack_entry->end = addr;
@@ -3503,7 +3503,7 @@ vmspace_unshare(struct proc *p)
newvmspace = vmspace_fork(oldvmspace, &fork_charge);
if (newvmspace == NULL)
return (ENOMEM);
- if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) {
+ if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
vmspace_free(newvmspace);
return (ENOMEM);
}
@@ -3553,7 +3553,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
vm_object_t eobject;
- struct uidinfo *uip;
+ struct ucred *cred;
vm_ooffset_t size;
RetryLookup:;
@@ -3627,19 +3627,19 @@ RetryLookup:;
if (vm_map_lock_upgrade(map))
goto RetryLookup;
- if (entry->uip == NULL) {
+ if (entry->cred == NULL) {
/*
* The debugger owner is charged for
* the memory.
*/
- uip = curthread->td_ucred->cr_ruidinfo;
- uihold(uip);
- if (!swap_reserve_by_uid(size, uip)) {
- uifree(uip);
+ cred = curthread->td_ucred;
+ crhold(cred);
+ if (!swap_reserve_by_cred(size, cred)) {
+ crfree(cred);
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
- entry->uip = uip;
+ entry->cred = cred;
}
vm_object_shadow(
&entry->object.vm_object,
@@ -3647,19 +3647,19 @@ RetryLookup:;
atop(size));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
eobject = entry->object.vm_object;
- if (eobject->uip != NULL) {
+ if (eobject->cred != NULL) {
/*
* The object was not shadowed.
*/
- swap_release_by_uid(size, entry->uip);
- uifree(entry->uip);
- entry->uip = NULL;
- } else if (entry->uip != NULL) {
+ swap_release_by_cred(size, entry->cred);
+ crfree(entry->cred);
+ entry->cred = NULL;
+ } else if (entry->cred != NULL) {
VM_OBJECT_LOCK(eobject);
- eobject->uip = entry->uip;
+ eobject->cred = entry->cred;
eobject->charge = size;
VM_OBJECT_UNLOCK(eobject);
- entry->uip = NULL;
+ entry->cred = NULL;
}
vm_map_lock_downgrade(map);
@@ -3682,12 +3682,12 @@ RetryLookup:;
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
atop(size));
entry->offset = 0;
- if (entry->uip != NULL) {
+ if (entry->cred != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
- entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = size;
VM_OBJECT_UNLOCK(entry->object.vm_object);
- entry->uip = NULL;
+ entry->cred = NULL;
}
vm_map_lock_downgrade(map);
}
@@ -3861,14 +3861,14 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent -= 2;
}
} else {
- if (entry->uip != NULL)
- db_printf(", uip %d", entry->uip->ui_uid);
+ if (entry->cred != NULL)
+ db_printf(", ruid %d", entry->cred->cr_ruid);
db_printf(", object=%p, offset=0x%jx",
(void *)entry->object.vm_object,
(uintmax_t)entry->offset);
- if (entry->object.vm_object && entry->object.vm_object->uip)
- db_printf(", obj uip %d charge %jx",
- entry->object.vm_object->uip->ui_uid,
+ if (entry->object.vm_object && entry->object.vm_object->cred)
+ db_printf(", obj ruid %d charge %jx",
+ entry->object.vm_object->cred->cr_ruid,
(uintmax_t)entry->object.vm_object->charge);
if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 8715b41..f7fc5f5 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -114,7 +114,7 @@ struct vm_map_entry {
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
vm_pindex_t lastr; /* last read */
- struct uidinfo *uip; /* tmp storage for creator ref */
+ struct ucred *cred; /* tmp storage for creator ref */
};
#define MAP_ENTRY_NOSYNC 0x0001
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 94a96bd..9e5d159 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -213,7 +213,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->ref_count = 1;
object->memattr = VM_MEMATTR_DEFAULT;
object->flags = 0;
- object->uip = NULL;
+ object->cred = NULL;
object->charge = 0;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
object->flags = OBJ_ONEMAPPING;
@@ -634,15 +634,15 @@ vm_object_destroy(vm_object_t object)
/*
* Release the allocation charge.
*/
- if (object->uip != NULL) {
+ if (object->cred != NULL) {
KASSERT(object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP,
- ("vm_object_terminate: non-swap obj %p has uip",
+ ("vm_object_terminate: non-swap obj %p has cred",
object));
- swap_release_by_uid(object->charge, object->uip);
+ swap_release_by_cred(object->charge, object->cred);
object->charge = 0;
- uifree(object->uip);
- object->uip = NULL;
+ crfree(object->cred);
+ object->cred = NULL;
}
/*
@@ -1247,9 +1247,9 @@ vm_object_split(vm_map_entry_t entry)
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
}
- if (orig_object->uip != NULL) {
- new_object->uip = orig_object->uip;
- uihold(orig_object->uip);
+ if (orig_object->cred != NULL) {
+ new_object->cred = orig_object->cred;
+ crhold(orig_object->cred);
new_object->charge = ptoa(size);
KASSERT(orig_object->charge >= ptoa(size),
("orig_object->charge < 0"));
@@ -1928,20 +1928,20 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
/*
* Account for the charge.
*/
- if (prev_object->uip != NULL) {
+ if (prev_object->cred != NULL) {
/*
* If prev_object was charged, then this mapping,
* althought not charged now, may become writable
- * later. Non-NULL uip in the object would prevent
+ * later. Non-NULL cred in the object would prevent
* swap reservation during enabling of the write
* access, so reserve swap now. Failed reservation
* cause allocation of the separate object for the map
* entry, and swap reservation for this entry is
* managed in appropriate time.
*/
- if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
- prev_object->uip)) {
+ if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
+ prev_object->cred)) {
return (FALSE);
}
prev_object->charge += ptoa(next_size);
@@ -1959,7 +1959,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
swap_pager_freespace(prev_object,
next_pindex, next_size);
#if 0
- if (prev_object->uip != NULL) {
+ if (prev_object->cred != NULL) {
KASSERT(prev_object->charge >=
ptoa(prev_object->size - next_pindex),
("object %p overcharged 1 %jx %jx", prev_object,
@@ -2111,10 +2111,10 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
return;
db_iprintf(
- "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
+ "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
object, (int)object->type, (uintmax_t)object->size,
object->resident_page_count, object->ref_count, object->flags,
- object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
+ object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
object->shadow_count,
object->backing_object ? object->backing_object->ref_count : 0,
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 6a9f129..568a0d5 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -143,7 +143,7 @@ struct vm_object {
int swp_bcount;
} swp;
} un_pager;
- struct uidinfo *uip;
+ struct ucred *cred;
vm_ooffset_t charge;
};
OpenPOWER on IntegriCloud