diff options
author | kib <kib@FreeBSD.org> | 2009-03-02 18:53:30 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2009-03-02 18:53:30 +0000 |
commit | 453adb14fb8994d00a14b341bcf886ca9aad1d9e (patch) | |
tree | 126cfd157183d3118e00026a6f63a55085a27d45 /sys/kern/sysv_shm.c | |
parent | c672211541895fab644fcd12bb285ea5294f85f5 (diff) | |
download | FreeBSD-src-453adb14fb8994d00a14b341bcf886ca9aad1d9e.zip FreeBSD-src-453adb14fb8994d00a14b341bcf886ca9aad1d9e.tar.gz |
Correct types of variables used to track amount of allocated SysV shared
memory from int to size_t. Implement a workaround for current ABI not
allowing to properly save size for and report more then 2Gb sized segment
of shared memory.
This makes it possible to use > 2 Gb shared memory segments on 64bit
architectures. Please note the new BUGS section in shmctl(2) and
UPDATING note for limitations of this temporal solution.
Reviewed by: csjp
Tested by: Nikolay Dzham <i levsha org ua>
MFC after: 2 weeks
Diffstat (limited to 'sys/kern/sysv_shm.c')
-rw-r--r-- | sys/kern/sysv_shm.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c index 4e9854d..a945523 100644 --- a/sys/kern/sysv_shm.c +++ b/sys/kern/sysv_shm.c @@ -121,7 +121,8 @@ static sy_call_t *shmcalls[] = { #define SHMSEG_ALLOCATED 0x0800 #define SHMSEG_WANTED 0x1000 -static int shm_last_free, shm_nused, shm_committed, shmalloced; +static int shm_last_free, shm_nused, shmalloced; +size_t shm_committed; static struct shmid_kernel *shmsegs; struct shmmap_state { @@ -250,7 +251,7 @@ shm_deallocate_segment(shmseg) vm_object_deallocate(shmseg->u.shm_internal); shmseg->u.shm_internal = NULL; - size = round_page(shmseg->u.shm_segsz); + size = round_page(shmseg->shm_bsegsz); shm_committed -= btoc(size); shm_nused--; shmseg->u.shm_perm.mode = SHMSEG_FREE; @@ -270,7 +271,7 @@ shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) segnum = IPCID_TO_IX(shmmap_s->shmid); shmseg = &shmsegs[segnum]; - size = round_page(shmseg->u.shm_segsz); + size = round_page(shmseg->shm_bsegsz); result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); if (result != KERN_SUCCESS) return (EINVAL); @@ -390,7 +391,7 @@ kern_shmat(td, shmid, shmaddr, shmflg) error = EMFILE; goto done2; } - size = round_page(shmseg->u.shm_segsz); + size = round_page(shmseg->shm_bsegsz); #ifdef VM_PROT_READ_IS_EXEC prot = VM_PROT_READ | VM_PROT_EXECUTE; #else @@ -422,7 +423,8 @@ kern_shmat(td, shmid, shmaddr, shmflg) vm_object_reference(shmseg->u.shm_internal); rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal, - 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); + 0, &attach_va, size, (flags & MAP_FIXED) ? VMFS_NO_SPACE : + VMFS_ANY_SPACE, prot, prot, 0); if (rv != KERN_SUCCESS) { vm_object_deallocate(shmseg->u.shm_internal); error = ENOMEM; @@ -720,7 +722,7 @@ shmget_existing(td, uap, mode, segnum) if (error != 0) return (error); #endif - if (uap->size && uap->size > shmseg->u.shm_segsz) + if (uap->size && uap->size > shmseg->shm_bsegsz) return (EINVAL); td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); return (0); @@ -732,7 +734,8 @@ shmget_allocate_segment(td, uap, mode) struct shmget_args *uap; int mode; { - int i, segnum, shmid, size; + int i, segnum, shmid; + size_t size; struct ucred *cred = td->td_ucred; struct shmid_kernel *shmseg; vm_object_t shm_object; @@ -790,6 +793,7 @@ shmget_allocate_segment(td, uap, mode) shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) | (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; shmseg->u.shm_segsz = uap->size; + shmseg->shm_bsegsz = uap->size; shmseg->u.shm_cpid = td->td_proc->p_pid; shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; |