Rename vmgrainsize to vmpagesize (because it is in fact the operating system page size) and to avoid confusion with the arena grain size.

Copied from Perforce
 Change: 186543
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gareth Rees 2014-06-12 17:18:50 +01:00
parent e50888e0c5
commit 5aba28db8d
8 changed files with 74 additions and 74 deletions

View file

@ -102,7 +102,7 @@ static Bool VMChunkCheck(VMChunk vmchunk)
chunk = VMChunk2Chunk(vmchunk);
CHECKD(Chunk, chunk);
CHECKD_NOSIG(VM, vmchunk->vm); /* <design/check/#hidden-type> */
CHECKL(VMGrainSize(vmchunk->vm) == ChunkPageSize(chunk));
CHECKL(VMPageSize(vmchunk->vm) == ChunkPageSize(chunk));
CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
CHECKD(SparseArray, &vmchunk->pages);
/* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
@ -292,7 +292,7 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
if (res != ResOK)
goto failVMCreate;
pageSize = VMGrainSize(vm);
pageSize = VMPageSize(vm);
/* The VM will have aligned the userSize; pick up the actual size. */
base = VMBase(vm);
limit = VMLimit(vm);
@ -521,7 +521,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
arena = VMArena2Arena(vmArena);
/* <code/arena.c#init.caller> */
res = ArenaInit(arena, class, VMGrainSize(arenaVM), args);
res = ArenaInit(arena, class, VMPageSize(arenaVM), args);
if (res != ResOK)
goto failArenaInit;
arena->committed = VMMapped(arenaVM);

View file

@ -450,7 +450,7 @@
/* VM Configuration -- see <code/vm*.c> */
#define VMAN_GRAIN_SIZE ((Align)4096)
#define VMAN_PAGE_SIZE ((Align)4096)
#define VMJunkBYTE ((unsigned char)0xA9)
#define VMParamSize (sizeof(Word))

View file

@ -997,7 +997,7 @@ extern Res RootsIterate(Globals arena, RootIterateFn f, void *p);
/* VM Interface -- see <code/vm.c>* */
extern Size VMGrainSize(VM vm);
extern Size VMPageSize(VM vm);
extern Bool VMCheck(VM vm);
extern Res VMParamFromArgs(void *params, size_t paramSize, ArgList args);
extern Res VMCreate(VM *VMReturn, Size size, void *params);

View file

@ -10,7 +10,7 @@
static Index pagesLength(SparseArray sa)
{
return (sa->length * sa->elementSize + VMGrainSize(sa->vm) - 1) >> sa->shift;
return (sa->length * sa->elementSize + VMPageSize(sa->vm) - 1) >> sa->shift;
}
void SparseArrayInit(SparseArray sa,
@ -25,8 +25,8 @@ void SparseArrayInit(SparseArray sa,
sa->mapped = mapped;
sa->pages = pages;
sa->vm = vm;
AVER(SizeIsP2(VMGrainSize(vm)));
sa->shift = SizeLog2(VMGrainSize(vm));
AVER(SizeIsP2(VMPageSize(vm)));
sa->shift = SizeLog2(VMPageSize(vm));
BTResRange(mapped, 0, length);
BTResRange(pages, 0, pagesLength(sa));
@ -49,11 +49,11 @@ Bool SparseArrayCheck(SparseArray sa)
CHECKL(sa->base != NULL);
CHECKL(sa->elementSize >= 1);
CHECKD_NOSIG(VM, sa->vm); /* <design/check/#hidden-type> */
CHECKL(sa->elementSize <= VMGrainSize(sa->vm));
CHECKL(sa->elementSize <= VMPageSize(sa->vm));
CHECKL(sa->length > 0);
CHECKD_NOSIG(BT, sa->mapped);
CHECKD_NOSIG(BT, sa->pages);
CHECKL(sa->shift == SizeLog2(VMGrainSize(sa->vm)));
CHECKL(sa->shift == SizeLog2(VMPageSize(sa->vm)));
return TRUE;
}
@ -139,7 +139,7 @@ void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
the page on which the base element resides. If any elements between
there and baseMI are defined, we can't unmap that page, so bump up. */
baseMI = (baseEI * sa->elementSize) >> sa->shift;
i = SizeAlignDown(baseEI * sa->elementSize, VMGrainSize(sa->vm)) / sa->elementSize;
i = SizeAlignDown(baseEI * sa->elementSize, VMPageSize(sa->vm)) / sa->elementSize;
if (i < baseEI && !BTIsResRange(sa->mapped, i, baseEI))
++baseMI;
@ -147,7 +147,7 @@ void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
the page on which the last element resides. If any elements between
limitMI and there are defined, we can't unmap that page, so bump down. */
limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1;
i = (SizeAlignUp(limitEI * sa->elementSize, VMGrainSize(sa->vm)) +
i = (SizeAlignUp(limitEI * sa->elementSize, VMPageSize(sa->vm)) +
sa->elementSize - 1) / sa->elementSize;
if (i > sa->length)
i = sa->length;

View file

@ -31,7 +31,7 @@ typedef struct SparseArrayStruct {
BT mapped; /* whether elements exist in the array */
BT pages; /* whether underlying pages are mapped */
VM vm; /* where pages are mapped from */
Shift shift; /* SizeLog2(VMGrainSize(vm)) TODO: VMShift(vm) */
Shift shift; /* SizeLog2(VMPageSize(vm)) TODO: VMShift(vm) */
} SparseArrayStruct;
extern void SparseArrayInit(SparseArray sa,

View file

@ -34,9 +34,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->base != (Addr)0);
CHECKL(vm->limit != (Addr)0);
CHECKL(vm->base < vm->limit);
CHECKL(ArenaGrainSizeCheck(VMAN_GRAIN_SIZE));
CHECKL(AddrIsAligned(vm->base, VMAN_GRAIN_SIZE));
CHECKL(AddrIsAligned(vm->limit, VMAN_GRAIN_SIZE));
CHECKL(ArenaGrainSizeCheck(VMAN_PAGE_SIZE));
CHECKL(AddrIsAligned(vm->base, VMAN_PAGE_SIZE));
CHECKL(AddrIsAligned(vm->limit, VMAN_PAGE_SIZE));
CHECKL(vm->block != NULL);
CHECKL((Addr)vm->block <= vm->base);
CHECKL(vm->mapped <= vm->reserved);
@ -44,12 +44,12 @@ Bool VMCheck(VM vm)
}
/* VMGrainSize -- return the grain size */
/* VMPageSize -- return the page size */
Size VMGrainSize(VM vm)
Size VMPageSize(VM vm)
{
UNUSED(vm);
return VMAN_GRAIN_SIZE;
return VMAN_PAGE_SIZE;
}
@ -71,12 +71,12 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
AVER(vmReturn != NULL);
AVER(params != NULL);
/* Note that because we add VMAN_GRAIN_SIZE rather than */
/* VMAN_GRAIN_SIZE-1 we are not in danger of overflowing */
/* Note that because we add VMAN_PAGE_SIZE rather than */
/* VMAN_PAGE_SIZE-1 we are not in danger of overflowing */
/* vm->limit even if malloc were perverse enough to give us */
/* a block at the end of memory. */
size = SizeRoundUp(size, VMAN_GRAIN_SIZE) + VMAN_GRAIN_SIZE;
if ((size < VMAN_GRAIN_SIZE) || (size > (Size)(size_t)-1))
size = SizeRoundUp(size, VMAN_PAGE_SIZE) + VMAN_PAGE_SIZE;
if ((size < VMAN_PAGE_SIZE) || (size > (Size)(size_t)-1))
return ResRESOURCE;
vm = (VM)malloc(sizeof(VMStruct));
@ -89,15 +89,15 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
return ResMEMORY;
}
vm->base = AddrAlignUp((Addr)vm->block, VMAN_GRAIN_SIZE);
vm->limit = AddrAdd(vm->base, size - VMAN_GRAIN_SIZE);
vm->base = AddrAlignUp((Addr)vm->block, VMAN_PAGE_SIZE);
vm->limit = AddrAdd(vm->base, size - VMAN_PAGE_SIZE);
AVER(vm->limit < AddrAdd((Addr)vm->block, size));
memset((void *)vm->block, VMJunkBYTE, size);
/* Lie about the reserved address space, to simulate real */
/* virtual memory. */
vm->reserved = size - VMAN_GRAIN_SIZE;
vm->reserved = size - VMAN_PAGE_SIZE;
vm->mapped = (Size)0;
vm->sig = VMSig;
@ -179,8 +179,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMAN_GRAIN_SIZE));
AVER(AddrIsAligned(limit, VMAN_GRAIN_SIZE));
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
size = AddrOffset(base, limit);
memset((void *)base, (int)0, size);
@ -202,8 +202,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMAN_GRAIN_SIZE));
AVER(AddrIsAligned(limit, VMAN_GRAIN_SIZE));
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
size = AddrOffset(base, limit);
memset((void *)base, 0xCD, size);

View file

@ -64,18 +64,18 @@ SRCID(vmix, "$Id$");
typedef struct VMStruct {
Sig sig; /* <design/sig/> */
Size grainSize; /* grain size */
Size pageSize; /* page size */
Addr base, limit; /* boundaries of reserved space */
Size reserved; /* total reserved address space */
Size mapped; /* total mapped memory */
} VMStruct;
/* VMGrainSize -- return grain size */
/* VMPageSize -- return page size */
Size VMGrainSize(VM vm)
Size VMPageSize(VM vm)
{
return vm->grainSize;
return vm->pageSize;
}
@ -88,9 +88,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->limit != 0);
CHECKL(vm->base < vm->limit);
CHECKL(vm->mapped <= vm->reserved);
CHECKL(ArenaGrainSizeCheck(vm->grainSize));
CHECKL(AddrIsAligned(vm->base, vm->grainSize));
CHECKL(AddrIsAligned(vm->limit, vm->grainSize));
CHECKL(ArenaGrainSizeCheck(vm->pageSize));
CHECKL(AddrIsAligned(vm->base, vm->pageSize));
CHECKL(AddrIsAligned(vm->limit, vm->pageSize));
return TRUE;
}
@ -109,31 +109,31 @@ Res VMParamFromArgs(void *params, size_t paramSize, ArgList args)
Res VMCreate(VM *vmReturn, Size size, void *params)
{
VM vm;
int pagesize;
Size grainSize;
int ospagesize;
Size pageSize;
void *addr;
Res res;
AVER(vmReturn != NULL);
AVER(params != NULL);
/* Find out the page size from the OS */
pagesize = getpagesize();
/* Find out the operating system page size */
ospagesize = getpagesize();
/* Check the page size will fit in a Size. */
AVER((unsigned long)pagesize <= (unsigned long)(Size)-1);
AVER((unsigned long)ospagesize <= (unsigned long)(Size)-1);
/* Check that the page size is valid for use as an arena grain size. */
grainSize = (Size)pagesize;
AVERT(ArenaGrainSize, grainSize);
pageSize = (Size)ospagesize;
AVERT(ArenaGrainSize, pageSize);
/* Check that the rounded-up size will fit in a Size. */
size = SizeRoundUp(size, grainSize);
if (size < grainSize || size > (Size)(size_t)-1)
size = SizeRoundUp(size, pageSize);
if (size < pageSize || size > (Size)(size_t)-1)
return ResRESOURCE;
/* Map in a page to store the descriptor on. */
addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), grainSize),
addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), pageSize),
PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE,
-1, 0);
@ -147,7 +147,7 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
}
vm = (VM)addr;
vm->grainSize = grainSize;
vm->pageSize = pageSize;
/* See .assume.not-last. */
addr = mmap(0, (size_t)size,
@ -175,7 +175,7 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
return ResOK;
failReserve:
(void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), grainSize));
(void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), pageSize));
return res;
}
@ -200,7 +200,7 @@ void VMDestroy(VM vm)
r = munmap((void *)vm->base, (size_t)AddrOffset(vm->base, vm->limit));
AVER(r == 0);
r = munmap((void *)vm,
(size_t)SizeAlignUp(sizeof(VMStruct), vm->grainSize));
(size_t)SizeAlignUp(sizeof(VMStruct), vm->pageSize));
AVER(r == 0);
}
@ -256,8 +256,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, vm->grainSize));
AVER(AddrIsAligned(limit, vm->grainSize));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
@ -288,8 +288,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, vm->grainSize));
AVER(AddrIsAligned(limit, vm->grainSize));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);

View file

@ -58,20 +58,20 @@ SRCID(vmw3, "$Id$");
typedef struct VMStruct {
Sig sig; /* <design/sig/> */
Size grainSize; /* grain size */
Size pageSize; /* page size */
Addr base, limit; /* boundaries of reserved space */
Size reserved; /* total reserved address space */
Size mapped; /* total mapped memory */
} VMStruct;
/* VMGrainSize -- return the page size */
/* VMPageSize -- return the page size */
Size VMGrainSize(VM vm)
Size VMPageSize(VM vm)
{
AVERT(VM, vm);
return vm->grainSize;
return vm->pageSize;
}
@ -84,9 +84,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->limit != 0);
CHECKL(vm->base < vm->limit);
CHECKL(vm->mapped <= vm->reserved);
CHECKL(ArenaGrainSizeCheck(vm->grainSize));
CHECKL(AddrIsAligned(vm->base, vm->grainSize));
CHECKL(AddrIsAligned(vm->limit, vm->grainSize));
CHECKL(ArenaGrainSizeCheck(vm->pageSize));
CHECKL(AddrIsAligned(vm->base, vm->pageSize));
CHECKL(AddrIsAligned(vm->limit, vm->pageSize));
return TRUE;
}
@ -122,7 +122,7 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
LPVOID vbase;
SYSTEM_INFO si;
VM vm;
Size grainSize;
Size pageSize;
Res res;
BOOL b;
VMParams vmParams = params;
@ -137,19 +137,19 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
GetSystemInfo(&si);
/* Check the page size will fit in a Size. */
grainSize = (Size)si.dwPageSize;
AVER((DWORD)grainSize == si.dwPageSize);
AVER((unsigned long)si.dwPageSize <= (unsigned long)(Size)-1);
/* Check that the page size is valid for use as an arena grain size. */
AVERT(ArenaGrainSize, grainSize);
pageSize = (Size)si.dwPageSize;
AVERT(ArenaGrainSize, pageSize);
/* Check that the rounded-up size will fit in a Size. */
size = SizeRoundUp(size, grainSize);
if (size < grainSize || size > (Size)(SIZE_T)-1)
size = SizeRoundUp(size, pageSize);
if (size < pageSize || size > (Size)(SIZE_T)-1)
return ResRESOURCE;
/* Allocate the vm descriptor. This is likely to be wasteful. */
vbase = VirtualAlloc(NULL, SizeAlignUp(sizeof(VMStruct), grainSize),
vbase = VirtualAlloc(NULL, SizeAlignUp(sizeof(VMStruct), pageSize),
MEM_COMMIT, PAGE_READWRITE);
if (vbase == NULL)
return ResMEMORY;
@ -167,9 +167,9 @@ Res VMCreate(VM *vmReturn, Size size, void *params)
goto failReserve;
}
AVER(AddrIsAligned(vbase, grainSize));
AVER(AddrIsAligned(vbase, pageSize));
vm->grainSize = grainSize;
vm->pageSize = pageSize;
vm->base = (Addr)vbase;
vm->limit = AddrAdd(vbase, size);
vm->reserved = size;
@ -261,8 +261,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
LPVOID b;
AVERT(VM, vm);
AVER(AddrIsAligned(base, vm->grainSize));
AVER(AddrIsAligned(limit, vm->grainSize));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
@ -290,8 +290,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
BOOL b;
AVERT(VM, vm);
AVER(AddrIsAligned(base, vm->grainSize));
AVER(AddrIsAligned(limit, vm->grainSize));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);