Macros for vmbase etc.

Copied from Perforce
 Change: 186626
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gareth Rees 2014-06-14 15:50:13 +01:00
parent 0eb6c63a05
commit bcbaa67af9
4 changed files with 96 additions and 88 deletions

View file

@ -1,4 +1,4 @@
/* vm.h: VIRTUAL MEMOEY INTERFACE
/* vm.h: VIRTUAL MEMORY INTERFACE
*
* $Id: //info.ravenbrook.com/project/mps/branch/2014-06-14/vm/code/sa.h#2 $
* Copyright (c) 2014 Ravenbrook Limited. See end of file for license.
@ -23,17 +23,22 @@ typedef struct VMStruct {
} VMStruct;
#define VMBase(vm) RVALUE((vm)->base)
#define VMLimit(vm) RVALUE((vm)->limit)
#define VMReserved(vm) RVALUE((vm)->reserved)
#define VMMapped(vm) RVALUE((vm)->mapped)
extern Size VMPageSize(void);
extern Bool VMCheck(VM vm);
extern Res VMParamFromArgs(void *params, size_t paramSize, ArgList args);
extern Res VMCreate(VM vmReturn, Size size, Size grainSize, void *params);
extern void VMDestroy(VM vm);
extern Addr VMBase(VM vm);
extern Addr VMLimit(VM vm);
extern Addr (VMBase)(VM vm);
extern Addr (VMLimit)(VM vm);
extern Res VMMap(VM vm, Addr base, Addr limit);
extern void VMUnmap(VM vm, Addr base, Addr limit);
extern Size VMReserved(VM vm);
extern Size VMMapped(VM vm);
extern Size (VMReserved)(VM vm);
extern Size (VMMapped)(VM vm);
#endif /* vm_h */

View file

@ -8,7 +8,6 @@
#include "vm.h"
#include <stdlib.h> /* for malloc and free */
#include <string.h> /* for memset */
SRCID(vman, "$Id$");
@ -52,6 +51,7 @@ Res VMParamFromArgs(void *params, size_t paramSize, ArgList args)
Res VMCreate(VM vm, Size size, Size grainSize, void *params)
{
void *vbase;
Size pageSize, reserved;
AVER(vm != NULL);
@ -76,84 +76,84 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
if (reserved < grainSize || reserved > (Size)(size_t)-1)
return ResRESOURCE;
vm->block = malloc((size_t)reserved);
if (vm->block == NULL) {
vbase = malloc((size_t)reserved);
if (vbase == NULL)
return ResMEMORY;
}
(void)mps_lib_memset(vbase, VMJunkBYTE, reserved);
vm->base = AddrAlignUp((Addr)vm->block, grainSize);
vm->block = vbase;
vm->base = AddrAlignUp(vbase, grainSize);
vm->limit = AddrAdd(vm->base, size);
AVER(vm->base < vm->limit); /* can't overflow, as discussed above */
AVER(vm->limit < AddrAdd((Addr)vm->block, reserved));
memset((void *)vm->block, VMJunkBYTE, reserved);
vm->reserved = reserved;
vm->mapped = (Size)0;
vm->sig = VMSig;
AVERT(VM, vm);
EVENT3(VMCreate, vm, vm->base, vm->limit);
EVENT3(VMCreate, vm, VMBase(vm), VMLimit(vm));
return ResOK;
}
/* VMDestroy -- destroy the VM structure */
/* VMDestroy -- release all address space and finish VM structure */
void VMDestroy(VM vm)
{
/* All vm areas should have been unmapped. */
AVERT(VM, vm);
AVER(vm->mapped == (Size)0);
/* Descriptor must not be stored inside its own VM at this point. */
AVER(PointerAdd(vm, sizeof *vm) <= vm->block
|| PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm);
/* All address space must have been unmapped. */
AVER(VMMapped(vm) == (Size)0);
EVENT1(VMDestroy, vm);
memset((void *)vm->base, VMJunkBYTE, AddrOffset(vm->base, vm->limit));
free(vm->block);
vm->sig = SigInvalid;
free(vm);
(void)mps_lib_memset(vm->block, VMJunkBYTE, vm->reserved);
free(vm->block);
}
/* VMBase -- return the base address of the memory reserved */
Addr VMBase(VM vm)
Addr (VMBase)(VM vm)
{
AVERT(VM, vm);
return vm->base;
return VMBase(vm);
}
/* VMLimit -- return the limit address of the memory reserved */
Addr VMLimit(VM vm)
Addr (VMLimit)(VM vm)
{
AVERT(VM, vm);
return vm->limit;
return VMLimit(vm);
}
/* VMReserved -- return the amount of address space reserved */
Size VMReserved(VM vm)
Size (VMReserved)(VM vm)
{
AVERT(VM, vm);
return vm->reserved;
return VMReserved(vm);
}
/* VMMapped -- return the amount of memory actually mapped */
Size VMMapped(VM vm)
Size (VMMapped)(VM vm)
{
AVERT(VM, vm);
return vm->mapped;
return VMMapped(vm);
}
@ -164,17 +164,17 @@ Res VMMap(VM vm, Addr base, Addr limit)
Size size;
AVER(base != (Addr)0);
AVER(vm->base <= base);
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
size = AddrOffset(base, limit);
memset((void *)base, (int)0, size);
(void)mps_lib_memset((void *)base, VMJunkByte, size);
vm->mapped += size;
AVER(vm->mapped <= vm->reserved);
AVER(VMMapped(vm) <= VMReserved(vm));
EVENT3(VMMap, vm, base, limit);
return ResOK;
@ -188,16 +188,16 @@ void VMUnmap(VM vm, Addr base, Addr limit)
Size size;
AVER(base != (Addr)0);
AVER(vm->base <= base);
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
size = AddrOffset(base, limit);
memset((void *)base, 0xCD, size);
AVER(VMMapped(vm) >= size);
AVER(vm->mapped >= size);
(void)mps_lib_memset((void *)base, VMJunkBYTE, size);
vm->mapped -= size;
EVENT3(VMUnmap, vm, base, limit);

View file

@ -105,7 +105,7 @@ Res VMParamFromArgs(void *params, size_t paramSize, ArgList args)
Res VMCreate(VM vm, Size size, Size grainSize, void *params)
{
Size pageSize, reserved;
void *addr;
void *vbase;
AVER(vm != NULL);
AVERT(ArenaGrainSize, grainSize);
@ -126,20 +126,20 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
return ResRESOURCE;
/* See .assume.not-last. */
addr = mmap(0, reserved,
PROT_NONE, MAP_ANON | MAP_PRIVATE,
-1, 0);
vbase = mmap(0, reserved,
PROT_NONE, MAP_ANON | MAP_PRIVATE,
-1, 0);
/* On Darwin the MAP_FAILED return value is not documented, but does
* work. MAP_FAILED _is_ documented by POSIX.
*/
if(addr == MAP_FAILED) {
if (vbase == MAP_FAILED) {
int e = errno;
AVER(e == ENOMEM); /* .assume.mmap.err */
return ResRESOURCE;
}
vm->block = addr;
vm->base = AddrAlignUp(addr, grainSize);
vm->block = vbase;
vm->base = AddrAlignUp(vbase, grainSize);
vm->limit = AddrAdd(vm->base, size);
AVER(vm->base < vm->limit); /* .assume.not-last */
AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved));
@ -149,26 +149,26 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
vm->sig = VMSig;
AVERT(VM, vm);
EVENT3(VMCreate, vm, vm->base, vm->limit);
EVENT3(VMCreate, vm, VMBase(vm), VMLimit(vm));
return ResOK;
}
/* VMDestroy -- release all address space and destroy VM structure */
/* VMDestroy -- release all address space and finish VM structure */
void VMDestroy(VM vm)
{
int r;
AVERT(VM, vm);
AVER(vm->mapped == (Size)0);
/* Descriptor must not be stored inside its own VM at this point. */
AVER(PointerAdd(vm, sizeof *vm) <= vm->block
|| PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm);
/* All address space must have been unmapped. */
AVER(VMMapped(vm) == (Size)0);
EVENT1(VMDestroy, vm);
/* This appears to be pretty pointless, since the VM is about to
* vanish completely. However, munmap might fail for some reason,
* and this would ensure that it was still discovered if sigs are
* being checked. */
vm->sig = SigInvalid;
r = munmap(vm->block, vm->reserved);
@ -178,41 +178,41 @@ void VMDestroy(VM vm)
/* VMBase -- return the base address of the memory reserved */
Addr VMBase(VM vm)
Addr (VMBase)(VM vm)
{
AVERT(VM, vm);
return vm->base;
return VMBase(vm);
}
/* VMLimit -- return the limit address of the memory reserved */
Addr VMLimit(VM vm)
Addr (VMLimit)(VM vm)
{
AVERT(VM, vm);
return vm->limit;
return VMLimit(vm);
}
/* VMReserved -- return the amount of memory reserved */
Size VMReserved(VM vm)
Size (VMReserved)(VM vm)
{
AVERT(VM, vm);
return vm->reserved;
return VMReserved(vm);
}
/* VMMapped -- return the amount of memory actually mapped */
Size VMMapped(VM vm)
Size (VMMapped)(VM vm)
{
AVERT(VM, vm);
return vm->mapped;
return VMMapped(vm);
}
@ -225,8 +225,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVERT(VM, vm);
AVER(sizeof(void *) == sizeof(Addr));
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(base >= VMBase(vm));
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
@ -242,7 +242,7 @@ Res VMMap(VM vm, Addr base, Addr limit)
}
vm->mapped += size;
AVER(vm->mapped <= vm->reserved);
AVER(VMMapped(vm) <= VMReserved(vm));
EVENT3(VMMap, vm, base, limit);
return ResOK;
@ -258,12 +258,13 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVERT(VM, vm);
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(base >= VMBase(vm));
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
size = AddrOffset(base, limit);
AVER(size <= VMMapped(vm));
/* see <design/vmo1/#fun.unmap.offset> */
addr = mmap((void *)base, (size_t)size,

View file

@ -159,72 +159,70 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
vm->sig = VMSig;
AVERT(VM, vm);
EVENT3(VMCreate, vm, vm->base, vm->limit);
EVENT3(VMCreate, vm, VMBase(vm), VMLimit(vm));
return ResOK;
}
/* VMDestroy -- destroy the VM structure */
/* VMDestroy -- release address space and finish the VM structure */
void VMDestroy(VM vm)
{
BOOL b;
AVERT(VM, vm);
AVER(vm->mapped == 0);
/* Descriptor must not be stored inside its own VM at this point. */
AVER(PointerAdd(vm, sizeof *vm) <= vm->block
|| PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm);
/* All address space must have been unmapped. */
AVER(VMMapped(vm) == (Size)0);
EVENT1(VMDestroy, vm);
/* This appears to be pretty pointless, since the vm descriptor page
* is about to vanish completely. However, the VirtualFree might
* fail and it would be nice to have a dead sig there. */
vm->sig = SigInvalid;
b = VirtualFree((LPVOID)vm->block, (SIZE_T)0, MEM_RELEASE);
AVER(b != 0);
b = VirtualFree((LPVOID)vm, (SIZE_T)0, MEM_RELEASE);
AVER(b != 0);
}
/* VMBase -- return the base address of the memory reserved */
Addr VMBase(VM vm)
Addr (VMBase)(VM vm)
{
AVERT(VM, vm);
return vm->base;
return VMBase(vm);
}
/* VMLimit -- return the limit address of the memory reserved */
Addr VMLimit(VM vm)
Addr (VMLimit)(VM vm)
{
AVERT(VM, vm);
return vm->limit;
return VMLimit(vm);
}
/* VMReserved -- return the amount of address space reserved */
Size VMReserved(VM vm)
Size (VMReserved)(VM vm)
{
AVERT(VM, vm);
return vm->reserved;
return VMReserved(vm);
}
/* VMMapped -- return the amount of memory actually mapped */
Size VMMapped(VM vm)
Size (VMMapped)(VM vm)
{
AVERT(VM, vm);
return vm->mapped;
return VMMapped(vm);
}
@ -237,9 +235,9 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVERT(VM, vm);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(vm->base <= base);
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(limit <= VMLimit(vm));
/* .improve.query-map: We could check that the pages we are about to
* map are unmapped using VirtualQuery. */
@ -251,7 +249,7 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER((Addr)b == base); /* base should've been aligned */
vm->mapped += AddrOffset(base, limit);
AVER(vm->mapped <= vm->reserved);
AVER(VMMapped(vm) <= VMReserved(vm));
EVENT3(VMMap, vm, base, limit);
return ResOK;
@ -263,19 +261,23 @@ Res VMMap(VM vm, Addr base, Addr limit)
void VMUnmap(VM vm, Addr base, Addr limit)
{
BOOL b;
Size size;
AVERT(VM, vm);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(vm->base <= base);
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(limit <= VMLimit(vm));
size = AddrOffset(base, limit);
AVER(size >= VMMapped(vm));
/* .improve.query-unmap: Could check that the pages we are about */
/* to unmap are mapped, using VirtualQuery. */
b = VirtualFree((LPVOID)base, (SIZE_T)AddrOffset(base, limit), MEM_DECOMMIT);
b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT);
AVER(b != 0); /* .assume.free.success */
vm->mapped -= AddrOffset(base, limit);
vm->mapped -= size;
EVENT3(VMUnmap, vm, base, limit);
}