From ec9a315e34b1a2c46f5287e8639fd04858a69195 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Sat, 17 May 2014 10:02:47 +0100 Subject: [PATCH 01/24] Branching master to branch/2014-05-17/chunk-tree. Copied from Perforce Change: 186152 ServerID: perforce.ravenbrook.com From 0b0a46567404ece617d03a5d10ad7837ade8705b Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Sat, 17 May 2014 17:05:42 +0100 Subject: [PATCH 02/24] Replace the chunk ring with a chunk tree. Fix bug in SplayFindNext (LESS and GREATER the wrong way round). Copied from Perforce Change: 186157 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 88 ++++++++++++-------- mps/code/arenacl.c | 37 +++++---- mps/code/arenavm.c | 68 ++++++++------- mps/code/mpm.h | 1 + mps/code/mpmst.h | 17 +--- mps/code/splay.c | 44 ++++++---- mps/code/splay.h | 2 + mps/code/tract.c | 201 +++++++++++++++++---------------------------- mps/code/tract.h | 20 +++-- mps/code/tree.h | 11 +++ 10 files changed, 247 insertions(+), 242 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 30f7687ce81..8a70303386f 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -147,9 +147,9 @@ Bool ArenaCheck(Arena arena) if (arena->primary != NULL) { CHECKD(Chunk, arena->primary); } - CHECKD_NOSIG(Ring, &arena->chunkRing); + /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ + CHECKL(SplayTreeCheck(ArenaChunkTree(arena))); /* nothing to check for chunkSerial */ - CHECKD(ChunkCacheEntry, &arena->chunkCache); CHECKL(LocusCheck(arena)); @@ -205,9 +205,8 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) arena->zoned = zoned; arena->primary = NULL; - RingInit(&arena->chunkRing); + SplayTreeInit(ArenaChunkTree(arena), ChunkCompare, ChunkKey, SplayTrivUpdate); arena->chunkSerial = (Serial)0; - ChunkCacheEntryInit(&arena->chunkCache); LocusInit(arena); @@ -349,7 +348,8 @@ void ArenaFinish(Arena arena) arena->sig = SigInvalid; GlobalsFinish(ArenaGlobals(arena)); LocusFinish(arena); - RingFinish(&arena->chunkRing); + AVER(SplayTreeIsEmpty(ArenaChunkTree(arena))); + SplayTreeFinish(ArenaChunkTree(arena)); } @@ -606,52 +606,74 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream) * CBS or any pool, because it is used as part of the bootstrap. */ -static Res arenaAllocPageInChunk(Addr *baseReturn, Chunk chunk, Pool pool) -{ - Res res; - Index basePageIndex, limitPageIndex; +typedef struct ArenaAllocPageClosureStruct { Arena arena; + Pool pool; + Chunk avoid; + Addr base; +} ArenaAllocPageClosureStruct, *ArenaAllocPageClosure; - AVER(baseReturn != NULL); +static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) +{ + ArenaAllocPageClosure cl; + Chunk chunk; + Index basePageIndex, limitPageIndex; + Res res; + + AVERT(Tree, tree); + chunk = ChunkOfTree(tree); AVERT(Chunk, chunk); - AVERT(Pool, pool); - arena = ChunkArena(chunk); + AVER(closureP != NULL); + cl = closureP; + AVER(cl->arena == ChunkArena(chunk)); + UNUSED(closureS); + + if (chunk == cl->avoid) + return TRUE; if (!BTFindShortResRange(&basePageIndex, &limitPageIndex, chunk->allocTable, chunk->allocBase, chunk->pages, 1)) - return ResRESOURCE; + return TRUE; - res = (*arena->class->pagesMarkAllocated)(arena, chunk, - basePageIndex, 1, - pool); + res = (*cl->arena->class->pagesMarkAllocated)(cl->arena, chunk, + basePageIndex, 1, cl->pool); if (res != ResOK) - return res; + return TRUE; - *baseReturn = PageIndexBase(chunk, basePageIndex); - return ResOK; + cl->base = PageIndexBase(chunk, basePageIndex); + return FALSE; } static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) { - Res res; + ArenaAllocPageClosureStruct closure; + AVER(baseReturn != NULL); + AVERT(Arena, arena); + AVERT(Pool, pool); + + closure.arena = arena; + closure.pool = pool; + closure.base = NULL; + /* Favour the primary chunk, because pages allocated this way aren't currently freed, and we don't want to prevent chunks being destroyed. */ /* TODO: Consider how the ArenaCBSBlockPool might free pages. */ - res = arenaAllocPageInChunk(baseReturn, arena->primary, pool); - if (res != ResOK) { - Ring node, next; - RING_FOR(node, &arena->chunkRing, next) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); - if (chunk != arena->primary) { - res = arenaAllocPageInChunk(baseReturn, chunk, pool); - if (res == ResOK) - break; - } - } - } - return res; + if (arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, 0) == FALSE) + goto found; + + closure.avoid = arena->primary; + if (SplayTreeTraverse(ArenaChunkTree(arena), arenaAllocPageInChunk, &closure, 0) + == FALSE) + goto found; + + return ResRESOURCE; + +found: + AVER(closure.base != NULL); + *baseReturn = closure.base; + return ResOK; } diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index 1a0fcf3c6e3..1a2c52f30e0 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -290,15 +290,14 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) static void ClientArenaFinish(Arena arena) { ClientArena clientArena; - Ring node, next; clientArena = Arena2ClientArena(arena); AVERT(ClientArena, clientArena); - /* destroy all chunks */ - RING_FOR(node, &arena->chunkRing, next) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); - clientChunkDestroy(chunk); + /* destroy all chunks, including the primary */ + arena->primary = NULL; + while (!SplayTreeIsEmpty(ArenaChunkTree(arena))) { + clientChunkDestroy(ChunkOfTree(SplayTreeRoot(ArenaChunkTree(arena)))); } clientArena->sig = SigInvalid; @@ -329,20 +328,30 @@ static Res ClientArenaExtend(Arena arena, Addr base, Size size) /* ClientArenaReserved -- return the amount of reserved address space */ +static Bool clientArenaReservedVisitor(Tree tree, void *closureP, Size closureS) +{ + Size *size; + Chunk chunk; + + AVERT(Tree, tree); + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); + AVER(closureP != 0); + size = closureP; + UNUSED(closureS); + + *size += ChunkSize(chunk); + return TRUE; +} + static Size ClientArenaReserved(Arena arena) { - Size size; - Ring node, nextNode; + Size size = 0; AVERT(Arena, arena); - size = 0; - /* .req.extend.slow */ - RING_FOR(node, &arena->chunkRing, nextNode) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); - AVERT(Chunk, chunk); - size += AddrOffset(chunk->base, chunk->limit); - } + TreeTraverse(SplayTreeRoot(ArenaChunkTree(arena)), ChunkCompare, ChunkKey, + clientArenaReservedVisitor, &size, 0); return size; } diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index f878092bd01..e5343c1329a 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -557,7 +557,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) /* bits in a word). Fail if the chunk is so small stripes are smaller */ /* than pages. Note that some zones are discontiguous in the chunk if */ /* the size is not a power of 2. See . */ - chunkSize = AddrOffset(chunk->base, chunk->limit); + chunkSize = ChunkSize(chunk); arena->zoneShift = SizeFloorLog2(chunkSize >> MPS_WORD_SHIFT); AVER(chunk->pageSize == arena->alignment); @@ -588,7 +588,6 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) static void VMArenaFinish(Arena arena) { VMArena vmArena; - Ring node, next; VM arenaVM; vmArena = Arena2VMArena(arena); @@ -599,9 +598,8 @@ static void VMArenaFinish(Arena arena) /* destroy all chunks, including the primary */ arena->primary = NULL; - RING_FOR(node, &arena->chunkRing, next) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); - vmChunkDestroy(chunk); + while (!SplayTreeIsEmpty(ArenaChunkTree(arena))) { + vmChunkDestroy(ChunkOfTree(SplayTreeRoot(ArenaChunkTree(arena)))); } /* Destroying the chunks should have purged and removed all spare pages. */ @@ -623,17 +621,34 @@ static void VMArenaFinish(Arena arena) * * Add up the reserved space from all the chunks. */ + +static Bool vmArenaReservedVisitor(Tree tree, void *closureP, Size closureS) +{ + Size *size; + Chunk chunk; + + AVERT(Tree, tree); + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); + AVER(closureP != 0); + size = closureP; + UNUSED(closureS); + + *size += VMReserved(Chunk2VMChunk(chunk)->vm); + return TRUE; +} + + static Size VMArenaReserved(Arena arena) { - Size reserved; - Ring node, next; + Size size = 0; - reserved = 0; - RING_FOR(node, &arena->chunkRing, next) { - VMChunk vmChunk = Chunk2VMChunk(RING_ELT(Chunk, chunkRing, node)); - reserved += VMReserved(vmChunk->vm); - } - return reserved; + AVERT(Arena, arena); + + TreeTraverse(SplayTreeRoot(ArenaChunkTree(arena)), ChunkCompare, ChunkKey, + vmArenaReservedVisitor, &size, 0); + + return size; } @@ -996,9 +1011,7 @@ static Size VMPurgeSpare(Arena arena, Size size) static void chunkUnmapSpare(Chunk chunk) { AVERT(Chunk, chunk); - (void)arenaUnmapSpare(ChunkArena(chunk), - AddrOffset(chunk->base, chunk->limit), - chunk); + (void)arenaUnmapSpare(ChunkArena(chunk), ChunkSize(chunk), chunk); } @@ -1071,8 +1084,8 @@ static void VMFree(Addr base, Size size, Pool pool) static void VMCompact(Arena arena, Trace trace) { VMArena vmArena; - Ring node, next; Size vmem1; + Tree tree; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -1080,24 +1093,21 @@ static void VMCompact(Arena arena, Trace trace) vmem1 = VMArenaReserved(arena); - RING_FOR(node, &arena->chunkRing, next) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); + tree = SplayTreeFirst(ArenaChunkTree(arena)); + while (tree != TreeEMPTY) { + Chunk chunk = ChunkOfTree(tree); + TreeKey key = ChunkKey(tree); + AVERT(Chunk, chunk); if(chunk != arena->primary - && BTIsResRange(chunk->allocTable, 0, chunk->pages)) { + && BTIsResRange(chunk->allocTable, 0, chunk->pages)) + { Addr base = chunk->base; - Size size = AddrOffset(chunk->base, chunk->limit); - - /* Ensure there are no spare (mapped) pages left in the chunk. - This could be short-cut if we're about to destroy the chunk, - provided we can do the correct accounting in the arena. */ - chunkUnmapSpare(chunk); - + Size size = ChunkSize(chunk); vmChunkDestroy(chunk); - vmArena->contracted(arena, base, size); } + tree = SplayTreeNext(ArenaChunkTree(arena), key); } - { Size vmem0 = trace->preTraceArenaReserved; Size vmem2 = VMArenaReserved(arena); diff --git a/mps/code/mpm.h b/mps/code/mpm.h index e6a0da4e834..33c994c1236 100644 --- a/mps/code/mpm.h +++ b/mps/code/mpm.h @@ -518,6 +518,7 @@ extern Ring GlobalsRememberedSummaryRing(Globals); #define ArenaAlign(arena) ((arena)->alignment) #define ArenaGreyRing(arena, rank) (&(arena)->greyRing[rank]) #define ArenaPoolRing(arena) (&ArenaGlobals(arena)->poolRing) +#define ArenaChunkTree(arena) (&(arena)->chunkTree) extern void ArenaEnterLock(Arena arena, Bool recursive); extern void ArenaLeaveLock(Arena arena, Bool recursive); diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h index 5d38f3a3319..7334ab56fa4 100644 --- a/mps/code/mpmst.h +++ b/mps/code/mpmst.h @@ -515,18 +515,6 @@ typedef struct TraceStruct { } TraceStruct; -/* ChunkCacheEntryStruct -- cache entry in the chunk cache */ - -#define ChunkCacheEntrySig ((Sig)0x519C80CE) /* SIGnature CHUnk Cache Entry */ - -typedef struct ChunkCacheEntryStruct { - Sig sig; - Chunk chunk; - Addr base; - Addr limit; -} ChunkCacheEntryStruct; - - /* ArenaClassStruct -- generic arena class interface */ #define ArenaClassSig ((Sig)0x519A6C1A) /* SIGnature ARena CLAss */ @@ -657,10 +645,9 @@ typedef struct mps_arena_s { Addr lastTractBase; /* base address of lastTract */ Chunk primary; /* the primary chunk */ - RingStruct chunkRing; /* all the chunks */ + SplayTreeStruct chunkTree; /* all the chunks */ Serial chunkSerial; /* next chunk number */ - ChunkCacheEntryStruct chunkCache; /* just one entry */ - + Bool hasFreeCBS; /* Is freeCBS available? */ MFSStruct freeCBSBlockPoolStruct; CBSStruct freeCBSStruct; diff --git a/mps/code/splay.c b/mps/code/splay.c index 7e061cd14f4..e3d15e85037 100644 --- a/mps/code/splay.c +++ b/mps/code/splay.c @@ -679,7 +679,7 @@ static Compare SplaySplay(SplayTree splay, TreeKey key, TreeCompare compare) SplayStateStruct stateStruct; #ifdef SPLAY_DEBUG - Count count = TreeDebugCount(SplayTreeRoot(tree), tree->compare, tree->nodeKey); + Count count = TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey); #endif /* Short-circuit common cases. Splay trees often bring recently @@ -699,7 +699,7 @@ static Compare SplaySplay(SplayTree splay, TreeKey key, TreeCompare compare) SplayTreeSetRoot(splay, stateStruct.middle); #ifdef SPLAY_DEBUG - AVER(count == TreeDebugCount(SplayTreeRoot(tree), tree->compare, tree->nodeKey)); + AVER(count == TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey)); #endif return cmp; @@ -894,7 +894,7 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, Bool found; Compare cmp; #ifdef SPLAY_DEBUG - Count count = TreeDebugCount(SplayTreeRoot(tree), tree->compare, tree->nodeKey); + Count count = TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey); #endif @@ -936,7 +936,7 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, SplayTreeSetRoot(splay, stateStruct.middle); #ifdef SPLAY_DEBUG - AVER(count == TreeDebugCount(SplayTreeRoot(tree), tree->compare, tree->nodeKey)); + AVER(count == TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey)); #endif return found; @@ -945,10 +945,8 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, /* SplayTreeFirst, SplayTreeNext -- iterators * - * SplayTreeFirst receives a key that must precede all - * nodes in the tree. It returns TreeEMPTY if the tree is empty. - * Otherwise, it splays the tree to the first node, and returns the - * new root. + * SplayTreeFirst returns TreeEMPTY if the tree is empty. Otherwise, + * it splays the tree to the first node, and returns the new root. * * SplayTreeNext takes a tree and splays it to the successor of a key * and returns the new root. Returns TreeEMPTY is there are no successors. @@ -957,8 +955,9 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, * unmodified. * * IMPORTANT: Iterating over the tree using these functions will leave - * the tree totally unbalanced, throwing away optimisations of the tree - * shape caused by previous splays. Consider using TreeTraverse instead. + * the tree totally unbalanced, throwing away optimisations of the + * tree shape caused by previous splays. Consider using + * SplayTreeTraverse instead. */ Tree SplayTreeFirst(SplayTree splay) { @@ -989,16 +988,26 @@ Tree SplayTreeNext(SplayTree splay, TreeKey oldKey) { default: NOTREACHED; /* defensive fall-through */ - case CompareGREATER: + case CompareLESS: return SplayTreeRoot(splay); - case CompareLESS: + case CompareGREATER: case CompareEQUAL: return SplayTreeSuccessor(splay); } } +/* SplayTreeTraverse -- iterate over splay tree without splaying it */ + +Bool SplayTreeTraverse(SplayTree splay, TreeVisitor visitor, + void *closureP, Size closureS) +{ + return TreeTraverse(splay->root, splay->compare, splay->nodeKey, + visitor, closureP, closureS); +} + + /* SplayNodeDescribe -- Describe a node in the splay tree * * Note that this breaks the restriction of .note.stack. @@ -1009,10 +1018,9 @@ static Res SplayNodeDescribe(Tree node, mps_lib_FILE *stream, SplayNodeDescribeMethod nodeDescribe) { Res res; -#if defined(AVER_AND_CHECK) if (!TreeCheck(node)) return ResFAIL; - /* stream and nodeDescribe checked by SplayTreeDescribe */ -#endif + if (stream == NULL) return ResFAIL; + if (!FUNCHECK(nodeDescribe)) return ResFAIL; res = WriteF(stream, "( ", NULL); if (res != ResOK) return res; @@ -1327,15 +1335,15 @@ Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, SplayNodeDescribeMethod nodeDescribe) { Res res; -#if defined(AVER_AND_CHECK) - if (!SplayTreeCheck(splay)) return ResFAIL; + if (!TESTT(SplayTree, splay)) return ResFAIL; if (stream == NULL) return ResFAIL; if (!FUNCHECK(nodeDescribe)) return ResFAIL; -#endif res = WriteF(stream, "Splay $P {\n", (WriteFP)splay, " compare $F\n", (WriteFF)splay->compare, + " nodeKey $F\n", (WriteFF)splay->nodeKey, + " updateNode $F\n", (WriteFF)splay->updateNode, NULL); if (res != ResOK) return res; diff --git a/mps/code/splay.h b/mps/code/splay.h index 86f7f470482..5103a2991e6 100644 --- a/mps/code/splay.h +++ b/mps/code/splay.h @@ -55,6 +55,8 @@ extern Bool SplayTreeNeighbours(Tree *leftReturn, extern Tree SplayTreeFirst(SplayTree splay); extern Tree SplayTreeNext(SplayTree splay, TreeKey oldKey); +extern Bool SplayTreeTraverse(SplayTree splay, TreeVisitor visitor, + void *closureP, Size closureS); typedef Bool (*SplayFindMethod)(Tree *nodeReturn, SplayTree splay, SplayTestNodeMethod testNode, diff --git a/mps/code/tract.c b/mps/code/tract.c index 2468887bc17..dd2c2d6cd57 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -17,9 +17,6 @@ SRCID(tract, "$Id$"); -static void ChunkDecache(Arena arena, Chunk chunk); - - /* TractArena -- get the arena of a tract */ #define TractArena(tract) PoolArena(TractPool(tract)) @@ -113,17 +110,17 @@ Bool ChunkCheck(Chunk chunk) CHECKS(Chunk, chunk); CHECKU(Arena, chunk->arena); CHECKL(chunk->serial < chunk->arena->chunkSerial); - CHECKD_NOSIG(Ring, &chunk->chunkRing); + /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ + CHECKL(TreeCheck(&chunk->chunkTree)); CHECKL(ChunkPagesToSize(chunk, 1) == ChunkPageSize(chunk)); CHECKL(ShiftCheck(ChunkPageShift(chunk))); CHECKL(chunk->base != (Addr)0); CHECKL(chunk->base < chunk->limit); - /* check chunk is in itself */ - CHECKL(chunk->base <= (Addr)chunk); + /* .chunk.at.base: check chunk structure is at its own base */ + CHECKL(chunk->base == (Addr)chunk); CHECKL((Addr)(chunk+1) <= chunk->limit); - CHECKL(ChunkSizeToPages(chunk, AddrOffset(chunk->base, chunk->limit)) - == chunk->pages); + CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages); /* check that the tables fit in the chunk */ CHECKL(chunk->allocBase <= chunk->pages); CHECKL(chunk->allocBase >= chunk->pageTablePages); @@ -176,14 +173,12 @@ Res ChunkInit(Chunk chunk, Arena arena, chunk->serial = (arena->chunkSerial)++; chunk->arena = arena; - RingInit(&chunk->chunkRing); - RingAppend(&arena->chunkRing, &chunk->chunkRing); chunk->pageSize = pageSize; chunk->pageShift = pageShift = SizeLog2(pageSize); chunk->base = base; chunk->limit = limit; - size = AddrOffset(base, limit); + size = ChunkSize(chunk); chunk->pages = pages = size >> pageShift; res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN); @@ -218,6 +213,9 @@ Res ChunkInit(Chunk chunk, Arena arena, goto failCBSInsert; } + TreeInit(&chunk->chunkTree); + SplayTreeInsert(ArenaChunkTree(arena), &chunk->chunkTree); + chunk->sig = ChunkSig; AVERT(Chunk, chunk); @@ -242,11 +240,17 @@ Res ChunkInit(Chunk chunk, Arena arena, void ChunkFinish(Chunk chunk) { + Bool res; + AVERT(Chunk, chunk); AVER(BTIsResRange(chunk->allocTable, 0, chunk->pages)); - ChunkDecache(chunk->arena, chunk); + + res = SplayTreeDelete(ArenaChunkTree(ChunkArena(chunk)), &chunk->chunkTree); + AVER(res); + chunk->sig = SigInvalid; - RingRemove(&chunk->chunkRing); + + TreeFinish(&chunk->chunkTree); if (ChunkArena(chunk)->hasFreeCBS) ArenaFreeCBSDelete(ChunkArena(chunk), @@ -262,92 +266,39 @@ void ChunkFinish(Chunk chunk) } -/* Chunk Cache - * - * Functions for manipulating the chunk cache in the arena. - */ +/* ChunkCompare -- Compare key to [base,limit) */ - -/* ChunkCacheEntryCheck -- check a chunk cache entry - * - * The cache is EITHER empty: - * - chunk is null; AND - * - base & limit are both null - * OR full: - * - chunk is non-null, points to a ChunkStruct; AND - * - base & limit are not both null; - * - * .chunk.empty.fields: Fields of an empty cache are nonetheless read, - * and must be correct. - */ - -Bool ChunkCacheEntryCheck(ChunkCacheEntry entry) +Compare ChunkCompare(Tree tree, TreeKey key) { - CHECKS(ChunkCacheEntry, entry); - if (entry->chunk == NULL) { - CHECKL(entry->base == NULL); /* .chunk.empty.fields */ - CHECKL(entry->limit == NULL); /* .chunk.empty.fields */ - } else { - CHECKL(!(entry->base == NULL && entry->limit == NULL)); - CHECKD(Chunk, entry->chunk); - CHECKL(entry->base == entry->chunk->base); - CHECKL(entry->limit == entry->chunk->limit); - } - return TRUE; -} + Addr base1, base2, limit2; + Chunk chunk; + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(tree != TreeEMPTY); -/* ChunkCacheEntryInit -- initialize a chunk cache entry */ - -void ChunkCacheEntryInit(ChunkCacheEntry entry) -{ - entry->chunk = NULL; - entry->base = NULL; /* .chunk.empty.fields */ - entry->limit = NULL; /* .chunk.empty.fields */ - entry->sig = ChunkCacheEntrySig; - AVERT(ChunkCacheEntry, entry); - return; -} - - -/* ChunkEncache -- cache a chunk */ - -static void ChunkEncache(Arena arena, Chunk chunk) -{ - /* [Critical path](../design/critical-path.txt); called by ChunkOfAddr */ - AVERT_CRITICAL(Arena, arena); + chunk = ChunkOfTree(tree); AVERT_CRITICAL(Chunk, chunk); - AVER_CRITICAL(arena == chunk->arena); - AVERT_CRITICAL(ChunkCacheEntry, &arena->chunkCache); - /* check chunk already in cache first */ - if (arena->chunkCache.chunk == chunk) { - return; - } + base1 = AddrOfTreeKey(key); + base2 = chunk->base; + limit2 = chunk->limit; - arena->chunkCache.chunk = chunk; - arena->chunkCache.base = chunk->base; - arena->chunkCache.limit = chunk->limit; - - AVERT_CRITICAL(ChunkCacheEntry, &arena->chunkCache); - return; + if (base1 < base2) + return CompareLESS; + else if (base1 >= limit2) + return CompareGREATER; + else + return CompareEQUAL; } -/* ChunkDecache -- make sure a chunk is not in the cache */ +/* ChunkKey -- Return the key corresponding to a chunk */ -static void ChunkDecache(Arena arena, Chunk chunk) +TreeKey ChunkKey(Tree tree) { - AVERT(Arena, arena); - AVERT(Chunk, chunk); - AVER(arena == chunk->arena); - AVERT(ChunkCacheEntry, &arena->chunkCache); - if (arena->chunkCache.chunk == chunk) { - arena->chunkCache.chunk = NULL; - arena->chunkCache.base = NULL; /* .chunk.empty.fields */ - arena->chunkCache.limit = NULL; /* .chunk.empty.fields */ - } - AVERT(ChunkCacheEntry, &arena->chunkCache); + /* See .chunk.at.base. */ + Chunk chunk = ChunkOfTree(tree); + return TreeKeyOfAddrVar(chunk); } @@ -355,58 +306,42 @@ static void ChunkDecache(Arena arena, Chunk chunk) Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) { - Ring node, next; + Tree tree; AVER_CRITICAL(chunkReturn != NULL); AVERT_CRITICAL(Arena, arena); /* addr is arbitrary */ - /* check cache first; see also .chunk.empty.fields */ - AVERT_CRITICAL(ChunkCacheEntry, &arena->chunkCache); - if (arena->chunkCache.base <= addr && addr < arena->chunkCache.limit) { - *chunkReturn = arena->chunkCache.chunk; - AVER_CRITICAL(*chunkReturn != NULL); + if (SplayTreeFind(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr))) { + Chunk chunk = ChunkOfTree(tree); + AVER_CRITICAL(chunk->base <= addr && addr < chunk->limit); + *chunkReturn = chunk; return TRUE; } - RING_FOR(node, &arena->chunkRing, next) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); - if (chunk->base <= addr && addr < chunk->limit) { - /* Gotcha! */ - ChunkEncache(arena, chunk); - *chunkReturn = chunk; - return TRUE; - } - } return FALSE; } -/* ChunkOfNextAddr +/* chunkAboveAddr * - * Finds the next higher chunk in memory which does _not_ contain addr. - * Returns FALSE if there is none. - * - * [The name is misleading; it should be "NextChunkAboveAddr" -- the - * word "Next" applies to chunks, not to addrs. RHSK 2010-03-20.] + * Finds the next higher chunk in memory which does _not_ contain + * addr. If there is such a chunk, update *chunkReturn and return + * TRUE, otherwise return FALSE. */ -static Bool ChunkOfNextAddr(Chunk *chunkReturn, Arena arena, Addr addr) +static Bool chunkAboveAddr(Chunk *chunkReturn, Arena arena, Addr addr) { - Addr leastBase; - Chunk leastChunk; - Ring node, next; + Tree tree; - leastBase = (Addr)(Word)-1; - leastChunk = NULL; - RING_FOR(node, &arena->chunkRing, next) { - Chunk chunk = RING_ELT(Chunk, chunkRing, node); - if (addr < chunk->base && chunk->base < leastBase) { - leastBase = chunk->base; - leastChunk = chunk; - } - } - if (leastChunk != NULL) { - *chunkReturn = leastChunk; + AVER_CRITICAL(chunkReturn != NULL); + AVERT_CRITICAL(Arena, arena); + /* addr is arbitrary */ + + tree = SplayTreeNext(ArenaChunkTree(arena), TreeKeyOfAddrVar(addr)); + if (tree != TreeEMPTY) { + Chunk chunk = ChunkOfTree(tree); + AVER_CRITICAL(addr < chunk->base); + *chunkReturn = chunk; return TRUE; } return FALSE; @@ -440,6 +375,24 @@ Index IndexOfAddr(Chunk chunk, Addr addr) } +/* ChunkNodeDescribe -- describe a single node in the tree of chunks, + * for SplayTreeDescribe + */ + +Res ChunkNodeDescribe(Tree node, mps_lib_FILE *stream) +{ + Chunk chunk; + + if (!TreeCheck(node)) return ResFAIL; + if (stream == NULL) return ResFAIL; + chunk = ChunkOfTree(node); + if (!TESTT(Chunk, chunk)) return ResFAIL; + + return WriteF(stream, "[$P,$P)", (WriteFP)chunk->base, + (WriteFP)chunk->limit, NULL); +} + + /* Page table functions */ /* .tract.critical: These Tract functions are low-level and are on @@ -562,7 +515,7 @@ static Bool tractSearch(Tract *tractReturn, Arena arena, Addr addr) return TRUE; } } - while (ChunkOfNextAddr(&chunk, arena, addr)) { + while (chunkAboveAddr(&chunk, arena, addr)) { /* If the ring was kept in address order, this could be improved. */ addr = chunk->base; /* Start from allocBase to skip the tables. */ diff --git a/mps/code/tract.h b/mps/code/tract.h index c359032feee..7aa20572bd7 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -9,8 +9,9 @@ #define tract_h #include "mpmtypes.h" -#include "ring.h" #include "bt.h" +#include "ring.h" +#include "tree.h" /* Page states @@ -137,7 +138,7 @@ typedef struct ChunkStruct { Sig sig; /* */ Serial serial; /* serial within the arena */ Arena arena; /* parent arena */ - RingStruct chunkRing; /* ring of all chunks in arena */ + TreeStruct chunkTree; /* node in tree of all chunks in arena */ Size pageSize; /* size of pages */ Shift pageShift; /* log2 of page size, for shifts */ Addr base; /* base address of chunk */ @@ -151,21 +152,24 @@ typedef struct ChunkStruct { #define ChunkArena(chunk) RVALUE((chunk)->arena) +#define ChunkSize(chunk) AddrOffset((chunk)->base, (chunk)->limit) #define ChunkPageSize(chunk) RVALUE((chunk)->pageSize) #define ChunkPageShift(chunk) RVALUE((chunk)->pageShift) #define ChunkPagesToSize(chunk, pages) ((Size)(pages) << (chunk)->pageShift) #define ChunkSizeToPages(chunk, size) ((Count)((size) >> (chunk)->pageShift)) #define ChunkPage(chunk, pi) (&(chunk)->pageTable[pi]) +#define ChunkOfTree(tree) PARENT(ChunkStruct, chunkTree, tree) extern Bool ChunkCheck(Chunk chunk); -extern Res ChunkInit(Chunk chunk, Arena arena, - Addr base, Addr limit, Align pageSize, BootBlock boot); +extern Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, + Align pageSize, BootBlock boot); extern void ChunkFinish(Chunk chunk); - +extern Compare ChunkCompare(Tree tree, TreeKey key); +extern TreeKey ChunkKey(Tree tree); extern Bool ChunkCacheEntryCheck(ChunkCacheEntry entry); extern void ChunkCacheEntryInit(ChunkCacheEntry entry); - extern Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr); +extern Res ChunkNodeDescribe(Tree node, mps_lib_FILE *stream); /* CHUNK_OF_ADDR -- return the chunk containing an address * @@ -173,9 +177,7 @@ extern Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr); */ #define CHUNK_OF_ADDR(chunkReturn, arena, addr) \ - (((arena)->chunkCache.base <= (addr) && (addr) < (arena)->chunkCache.limit) \ - ? (*(chunkReturn) = (arena)->chunkCache.chunk, TRUE) \ - : ChunkOfAddr(chunkReturn, arena, addr)) + ChunkOfAddr(chunkReturn, arena, addr) /* AddrPageBase -- the base of the page this address is on */ diff --git a/mps/code/tree.h b/mps/code/tree.h index 69ee841d3c3..159550406ac 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -40,6 +40,17 @@ typedef Compare (*TreeCompare)(Tree tree, TreeKey key); typedef TreeKey (*TreeKeyMethod)(Tree tree); +/* When storing Addrs in a tree, it is fastest to cast the Addr + * directly to a TreeKey. This assumes that Addr and TreeKey are + * compatible, possibly breaking . On an exotic + * platform where the types are not convertible, take the address of + * the variable in TreeKeyOfAddrVar, and dereference the address in + * AddrOfTreeKey. + */ +#define TreeKeyOfAddrVar(var) ((TreeKey)(var)) +#define AddrOfTreeKey(key) ((Addr)(key)) + + /* TreeEMPTY -- the empty tree * * TreeEMPTY is the tree with no nodes, and hence unable to satisfy its From e2d346aa6701d8abc3fda16cc958bac8481e8976 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Sun, 18 May 2014 22:27:07 +0100 Subject: [PATCH 03/24] No need to store primary chunk in the closure: can get it via the arena. Copied from Perforce Change: 186164 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 8a70303386f..2453769fca9 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -609,7 +609,6 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream) typedef struct ArenaAllocPageClosureStruct { Arena arena; Pool pool; - Chunk avoid; Addr base; } ArenaAllocPageClosureStruct, *ArenaAllocPageClosure; @@ -628,7 +627,8 @@ static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) AVER(cl->arena == ChunkArena(chunk)); UNUSED(closureS); - if (chunk == cl->avoid) + /* Already searched in arenaAllocPage. */ + if (chunk == cl->arena->primary) return TRUE; if (!BTFindShortResRange(&basePageIndex, &limitPageIndex, @@ -663,7 +663,6 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) if (arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, 0) == FALSE) goto found; - closure.avoid = arena->primary; if (SplayTreeTraverse(ArenaChunkTree(arena), arenaAllocPageInChunk, &closure, 0) == FALSE) goto found; From 632ac692a55c02fe72abe2cda03002486d65aeab Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Mon, 19 May 2014 12:24:11 +0100 Subject: [PATCH 04/24] Restore "avoid" mechanism in arenaallocpage. Make sure that we can tear down the arena if ArenaCreate fails: 1. Don't set hasFreeCBS until the block pool has some pages. 2. Finish the CBS block pool in ArenaFinish, not ArenaDestroy. 3. Delete the chunk from the arena's free CBS before destroying the chunk, just in case the chunk contains pages from the CBS's block pool. Copied from Perforce Change: 186177 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 9 ++++++--- mps/code/tract.c | 12 ++++++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 2453769fca9..36c989853cb 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -305,13 +305,13 @@ Res ArenaCreate(Arena *arenaReturn, ArenaClass class, ArgList args) /* With the primary chunk initialised we can add page memory to the freeCBS that describes the free address space in the primary chunk. */ - arena->hasFreeCBS = TRUE; res = ArenaFreeCBSInsert(arena, PageIndexBase(arena->primary, arena->primary->allocBase), arena->primary->limit); if (res != ResOK) goto failPrimaryCBS; + arena->hasFreeCBS = TRUE; res = ControlInit(arena); if (res != ResOK) @@ -344,6 +344,7 @@ Res ArenaCreate(Arena *arenaReturn, ArenaClass class, ArgList args) void ArenaFinish(Arena arena) { + PoolFinish(ArenaCBSBlockPool(arena)); ReservoirFinish(ArenaReservoir(arena)); arena->sig = SigInvalid; GlobalsFinish(ArenaGlobals(arena)); @@ -388,7 +389,6 @@ void ArenaDestroy(Arena arena) that would use the ZonedCBS. */ MFSFinishTracts(ArenaCBSBlockPool(arena), arenaMFSPageFreeVisitor, NULL, 0); - PoolFinish(ArenaCBSBlockPool(arena)); /* Call class-specific finishing. This will call ArenaFinish. */ (*arena->class->finish)(arena); @@ -610,6 +610,7 @@ typedef struct ArenaAllocPageClosureStruct { Arena arena; Pool pool; Addr base; + Chunk avoid; } ArenaAllocPageClosureStruct, *ArenaAllocPageClosure; static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) @@ -628,7 +629,7 @@ static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) UNUSED(closureS); /* Already searched in arenaAllocPage. */ - if (chunk == cl->arena->primary) + if (chunk == cl->avoid) return TRUE; if (!BTFindShortResRange(&basePageIndex, &limitPageIndex, @@ -656,6 +657,7 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) closure.arena = arena; closure.pool = pool; closure.base = NULL; + closure.avoid = NULL; /* Favour the primary chunk, because pages allocated this way aren't currently freed, and we don't want to prevent chunks being destroyed. */ @@ -663,6 +665,7 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) if (arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, 0) == FALSE) goto found; + closure.avoid = arena->primary; if (SplayTreeTraverse(ArenaChunkTree(arena), arenaAllocPageInChunk, &closure, 0) == FALSE) goto found; diff --git a/mps/code/tract.c b/mps/code/tract.c index dd2c2d6cd57..01bc65cd160 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -210,7 +210,7 @@ Res ChunkInit(Chunk chunk, Arena arena, PageIndexBase(chunk, chunk->allocBase), chunk->limit); if (res != ResOK) - goto failCBSInsert; + goto failCBSInsert; } TreeInit(&chunk->chunkTree); @@ -245,6 +245,11 @@ void ChunkFinish(Chunk chunk) AVERT(Chunk, chunk); AVER(BTIsResRange(chunk->allocTable, 0, chunk->pages)); + if (ChunkArena(chunk)->hasFreeCBS) + ArenaFreeCBSDelete(ChunkArena(chunk), + PageIndexBase(chunk, chunk->allocBase), + chunk->limit); + res = SplayTreeDelete(ArenaChunkTree(ChunkArena(chunk)), &chunk->chunkTree); AVER(res); @@ -252,11 +257,6 @@ void ChunkFinish(Chunk chunk) TreeFinish(&chunk->chunkTree); - if (ChunkArena(chunk)->hasFreeCBS) - ArenaFreeCBSDelete(ChunkArena(chunk), - PageIndexBase(chunk, chunk->allocBase), - chunk->limit); - if (chunk->arena->primary == chunk) chunk->arena->primary = NULL; From 15b35c6e1c7c4762edaaaed162dd04c08c48abd7 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Mon, 19 May 2014 13:47:56 +0100 Subject: [PATCH 05/24] New function splaydebugcount counts the number of items in a splay tree (while checking its consistency). Copied from Perforce Change: 186182 ServerID: perforce.ravenbrook.com --- mps/code/splay.c | 15 +++++++++++++++ mps/code/splay.h | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/mps/code/splay.c b/mps/code/splay.c index e3d15e85037..41cc3069412 100644 --- a/mps/code/splay.c +++ b/mps/code/splay.c @@ -164,6 +164,21 @@ void SplayDebugUpdate(SplayTree splay, Tree tree) } +/* SplayDebugCount -- count and check order of tree + * + * This function may be called from a debugger or temporarily inserted + * during development to check a tree's integrity. It may not be called + * from the production MPS because it uses indefinite stack depth. + * See . + */ + +Count SplayDebugCount(SplayTree splay) +{ + AVERT(SplayTree, splay); + return TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey); +} + + /* SplayZig -- move to left child, prepending to right tree * * Link the top node of the middle tree into the left child of the diff --git a/mps/code/splay.h b/mps/code/splay.h index 5103a2991e6..9bfb27c0ed1 100644 --- a/mps/code/splay.h +++ b/mps/code/splay.h @@ -77,7 +77,7 @@ extern Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, SplayNodeDescribeMethod nodeDescribe); extern void SplayDebugUpdate(SplayTree splay, Tree tree); - +extern Count SplayDebugCount(SplayTree splay); #endif /* splay_h */ From 5e702b6819be193997460e3851b3a9a9f552657b Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Mon, 19 May 2014 15:41:20 +0100 Subject: [PATCH 06/24] Gcbench now reports the number of chunks. Copied from Perforce Change: 186188 ServerID: perforce.ravenbrook.com --- mps/code/gcbench.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mps/code/gcbench.c b/mps/code/gcbench.c index c958128ce4f..8b841cbeeb0 100644 --- a/mps/code/gcbench.c +++ b/mps/code/gcbench.c @@ -12,6 +12,7 @@ #include "testthr.h" #include "fmtdy.h" #include "fmtdytst.h" +#include "mpm.h" #include /* fprintf, printf, putchars, sscanf, stderr, stdout */ #include /* alloca, exit, EXIT_FAILURE, EXIT_SUCCESS, strtoul */ @@ -244,6 +245,7 @@ static void arena_setup(gcthread_fn_t fn, } MPS_ARGS_END(args); watch(fn, name); mps_arena_park(arena); + printf("%u chunks\n", (unsigned)SplayDebugCount(ArenaChunkTree(arena))); mps_pool_destroy(pool); mps_fmt_destroy(format); if (ngen > 0) From 47befaa46b1ca6ec08b66b934fc994d7b7b18522 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Mon, 19 May 2014 20:19:50 +0100 Subject: [PATCH 07/24] Use treefind instead of splaytreefind to search the chunk tree. Balance the chunk tree after insertion and deletion. Avoid calling TractFirst and TractNext in ArenaDescribeTracts and PoolOfRange. Copied from Perforce Change: 186199 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 60 +++++++++++++++++++++++++----------------------- mps/code/pool.c | 27 ++++++++++++---------- mps/code/tract.c | 23 +++++++++++++++---- mps/code/tract.h | 4 ++-- mps/code/tree.c | 12 +++------- 5 files changed, 69 insertions(+), 57 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 36c989853cb..70e7931cb66 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -495,51 +495,53 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream) } -/* ArenaDescribeTracts -- describe all the tracts in the arena */ +/* ArenaDescribeTractsInChunk -- describe all the tracts in a chunk */ -Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream) +static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) { - Res res; + mps_lib_FILE *stream = closureP; + Chunk chunk; Tract tract; - Bool b; - Addr oldLimit, base, limit; - Size size; + Addr addr; + Res res; - if (!TESTT(Arena, arena)) return ResFAIL; + chunk = ChunkOfTree(tree); + if (!TESTT(Chunk, chunk)) return ResFAIL; if (stream == NULL) return ResFAIL; + UNUSED(closureS); - b = TractFirst(&tract, arena); - oldLimit = TractBase(tract); - while (b) { - base = TractBase(tract); - limit = TractLimit(tract); - size = ArenaAlign(arena); - - if (TractBase(tract) > oldLimit) { - res = WriteF(stream, - "[$P, $P) $W $U ---\n", - (WriteFP)oldLimit, (WriteFP)base, - (WriteFW)AddrOffset(oldLimit, base), - (WriteFU)AddrOffset(oldLimit, base), - NULL); - if (res != ResOK) return res; - } - + TRACT_TRACT_FOR(tract, addr, ChunkArena(chunk), + PageTract(ChunkPage(chunk, chunk->allocBase)), + chunk->limit) + { res = WriteF(stream, - "[$P, $P) $W $U $P ($S)\n", - (WriteFP)base, (WriteFP)limit, - (WriteFW)size, (WriteFW)size, + "[$P, $P) $U $P ($S)\n", + (WriteFP)TractBase(tract), (WriteFP)TractLimit(tract), + (WriteFW)ArenaAlign(ChunkArena(chunk)), (WriteFP)TractPool(tract), (WriteFS)(TractPool(tract)->class->name), NULL); if (res != ResOK) return res; - b = TractNext(&tract, arena, TractBase(tract)); - oldLimit = limit; + return ResOK; } return ResOK; } +/* ArenaDescribeTracts -- describe all the tracts in the arena */ + +Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream) +{ + if (!TESTT(Arena, arena)) return ResFAIL; + if (stream == NULL) return ResFAIL; + + (void)SplayTreeTraverse(ArenaChunkTree(arena), arenaDescribeTractsInChunk, + stream, 0); + + return ResOK; +} + + /* ControlAlloc -- allocate a small block directly from the control pool * * .arena.control-pool: Actually the block will be allocated from the diff --git a/mps/code/pool.c b/mps/code/pool.c index 5741470457a..060da7a47d5 100644 --- a/mps/code/pool.c +++ b/mps/code/pool.c @@ -625,29 +625,32 @@ Bool PoolOfAddr(Pool *poolReturn, Arena arena, Addr addr) */ Bool PoolOfRange(Pool *poolReturn, Arena arena, Addr base, Addr limit) { + Bool havePool = FALSE; Pool pool; Tract tract; + Addr addr, alignedBase, alignedLimit; AVER(poolReturn != NULL); AVERT(Arena, arena); AVER(base < limit); - if (!TractOfAddr(&tract, arena, base)) - return FALSE; + alignedBase = AddrAlignDown(base, ArenaAlign(arena)); + alignedLimit = AddrAlignUp(limit, ArenaAlign(arena)); - pool = TractPool(tract); - if (!pool) - return FALSE; - - while (TractLimit(tract) < limit) { - if (!TractNext(&tract, arena, TractBase(tract))) - return FALSE; - if (TractPool(tract) != pool) + TRACT_FOR(tract, addr, arena, alignedBase, alignedLimit) { + Pool p = TractPool(tract); + if (havePool && pool != p) return FALSE; + pool = p; + havePool = TRUE; } - *poolReturn = pool; - return TRUE; + if (havePool) { + *poolReturn = pool; + return TRUE; + } else { + return FALSE; + } } diff --git a/mps/code/tract.c b/mps/code/tract.c index 01bc65cd160..6f92178b4fe 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -160,6 +160,8 @@ Res ChunkInit(Chunk chunk, Arena arena, Size pageTableSize; void *p; Res res; + Bool inserted; + Tree updatedTree = NULL; /* chunk is supposed to be uninitialized, so don't check it. */ AVERT(Arena, arena); @@ -214,7 +216,12 @@ Res ChunkInit(Chunk chunk, Arena arena, } TreeInit(&chunk->chunkTree); - SplayTreeInsert(ArenaChunkTree(arena), &chunk->chunkTree); + inserted = TreeInsert(&updatedTree, SplayTreeRoot(ArenaChunkTree(arena)), + &chunk->chunkTree, TreeKeyOfAddrVar(chunk), + ChunkCompare); + AVER(inserted && updatedTree); + TreeBalance(&updatedTree); + ArenaChunkTree(arena)->root = updatedTree; chunk->sig = ChunkSig; AVERT(Chunk, chunk); @@ -241,17 +248,20 @@ Res ChunkInit(Chunk chunk, Arena arena, void ChunkFinish(Chunk chunk) { Bool res; + Arena arena; AVERT(Chunk, chunk); AVER(BTIsResRange(chunk->allocTable, 0, chunk->pages)); + arena = ChunkArena(chunk); - if (ChunkArena(chunk)->hasFreeCBS) - ArenaFreeCBSDelete(ChunkArena(chunk), + if (arena->hasFreeCBS) + ArenaFreeCBSDelete(arena, PageIndexBase(chunk, chunk->allocBase), chunk->limit); - res = SplayTreeDelete(ArenaChunkTree(ChunkArena(chunk)), &chunk->chunkTree); + res = SplayTreeDelete(ArenaChunkTree(arena), &chunk->chunkTree); AVER(res); + TreeBalance(&ArenaChunkTree(arena)->root); chunk->sig = SigInvalid; @@ -312,7 +322,10 @@ Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) AVERT_CRITICAL(Arena, arena); /* addr is arbitrary */ - if (SplayTreeFind(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr))) { + if (TreeFind(&tree, SplayTreeRoot(ArenaChunkTree(arena)), + TreeKeyOfAddrVar(addr), ChunkCompare) + == CompareEQUAL) + { Chunk chunk = ChunkOfTree(tree); AVER_CRITICAL(chunk->base <= addr && addr < chunk->limit); *chunkReturn = chunk; diff --git a/mps/code/tract.h b/mps/code/tract.h index 7aa20572bd7..b825e268c68 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -248,7 +248,7 @@ extern Bool TractFirst(Tract *tractReturn, Arena arena); extern Bool TractNext(Tract *tractReturn, Arena arena, Addr addr); -/* TRACT_TRACT_FOR -- iterate over a range of tracts +/* TRACT_TRACT_FOR -- iterate over a range of tracts in a chunk * * See . * Parameters arena & limit are evaluated multiple times. @@ -265,7 +265,7 @@ extern Bool TractNext(Tract *tractReturn, Arena arena, Addr addr); (tract = NULL) /* terminate loop */)) -/* TRACT_FOR -- iterate over a range of tracts +/* TRACT_FOR -- iterate over a range of tracts in a chunk * * See . * Parameters arena & limit are evaluated multiple times. diff --git a/mps/code/tree.c b/mps/code/tree.c index 9e19ef6edf9..0f17e0672f9 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -70,8 +70,6 @@ Count TreeDebugCount(Tree tree, TreeCompare compare, TreeKeyMethod key) } -#if 0 /* This code is not currently in use in the MPS */ - /* TreeFind -- search for a node matching the key * * If a matching node is found, sets *treeReturn to that node and returns @@ -134,7 +132,7 @@ Bool TreeInsert(Tree *treeReturn, Tree root, Tree node, Compare cmp; AVER(treeReturn != NULL); - AVER(Tree, root); + AVERT(Tree, root); AVER(TreeCheckLeaf(node)); AVER(FUNCHECK(compare)); /* key is arbitrary */ @@ -166,6 +164,8 @@ Bool TreeInsert(Tree *treeReturn, Tree root, Tree node, } +#if 0 /* This code is currently not in use in the MPS */ + /* TreeTraverseMorris -- traverse tree inorder in constant space * * The tree may not be accessed or modified during the traversal, and @@ -432,9 +432,6 @@ Tree TreeReverseRightSpine(Tree tree) } -#if 0 /* This code is currently not in use in the MPS */ - - /* TreeToVine -- unbalance a tree into a single right spine */ Count TreeToVine(Tree *link) @@ -488,9 +485,6 @@ void TreeBalance(Tree *treeIO) } -#endif /* not currently in use in the MPS */ - - /* C. COPYRIGHT AND LICENSE * * Copyright (C) 2014 Ravenbrook Limited . From 2f0ef9355da932afb53cc343b1aebd8e8fb68173 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Tue, 20 May 2014 18:12:37 +0100 Subject: [PATCH 08/24] Clarify tracefix logic by unwinding the nested conditions. Change the arena's chunk tree from a splay tree to an ordinary tree (so that it's not possible to accidentally splay it and leave it unbalanced). New function TreeFindNext allows us to implement TractFirst and TractNext without having to splay the tree. Make sure all operations on the chunk tree leave it balanced. But don't balance the tree directly in ChunkFinish() because this is only ever called in a loop where multiple chunks are being deleted from the tre. Instead use the sequence TreeToVine -- iterate and delete -- TreeBalance. The new macro TREE_DESTROY assists with this. No need any more for ArenaIsReservedAddr, CHUNK_OF_ADDR, TRACT_OF_ADDR. Update design documentation. Copied from Perforce Change: 186212 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 43 ++++++++-- mps/code/arenacl.c | 9 ++- mps/code/arenavm.c | 38 ++++++--- mps/code/gcbench.c | 3 +- mps/code/mpm.h | 5 +- mps/code/mpmst.h | 2 +- mps/code/trace.c | 112 ++++++++++++++------------ mps/code/tract.c | 41 +++------- mps/code/tract.h | 27 ------- mps/code/tree.c | 43 ++++++++++ mps/code/tree.h | 16 ++++ mps/design/arena.txt | 13 ++- mps/design/class-interface.txt | 13 ++- mps/design/critical-path.txt | 21 ++--- mps/design/scan.txt | 16 ++-- mps/design/trace.txt | 140 ++++++++++++++++++++------------- 16 files changed, 316 insertions(+), 226 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 70e7931cb66..1bee7fd7795 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -148,7 +148,7 @@ Bool ArenaCheck(Arena arena) CHECKD(Chunk, arena->primary); } /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ - CHECKL(SplayTreeCheck(ArenaChunkTree(arena))); + CHECKL(TreeCheck(ArenaChunkTree(arena))); /* nothing to check for chunkSerial */ CHECKL(LocusCheck(arena)); @@ -205,7 +205,7 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) arena->zoned = zoned; arena->primary = NULL; - SplayTreeInit(ArenaChunkTree(arena), ChunkCompare, ChunkKey, SplayTrivUpdate); + arena->chunkTree = TreeEMPTY; arena->chunkSerial = (Serial)0; LocusInit(arena); @@ -349,8 +349,7 @@ void ArenaFinish(Arena arena) arena->sig = SigInvalid; GlobalsFinish(ArenaGlobals(arena)); LocusFinish(arena); - AVER(SplayTreeIsEmpty(ArenaChunkTree(arena))); - SplayTreeFinish(ArenaChunkTree(arena)); + AVER(ArenaChunkTree(arena) == TreeEMPTY); } @@ -535,8 +534,8 @@ Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream) if (!TESTT(Arena, arena)) return ResFAIL; if (stream == NULL) return ResFAIL; - (void)SplayTreeTraverse(ArenaChunkTree(arena), arenaDescribeTractsInChunk, - stream, 0); + (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, + arenaDescribeTractsInChunk, stream, 0); return ResOK; } @@ -601,6 +600,33 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream) } +/* ArenaChunkInsert -- insert chunk into arena's chunk tree + * + * Note that there's no corresponding ArenaChunkDelete. That's because + * we don't have a function that deletes an item from a balanced tree + * efficiently. Instead, deletions from the chunk tree are carried out + * by calling TreeToVine, iterating over the vine (where deletion is + * straightforward) and then calling TreeBalance. This is efficient + * when deleting all the chunks at a time in ArenaFinish, and + * acceptable in VMCompact when multiple chunks may be deleted from + * the tree. + */ + +void ArenaChunkInsert(Arena arena, Tree tree) { + Bool inserted; + Tree updatedTree = NULL; + + AVERT(Arena, arena); + AVERT(Tree, tree); + + inserted = TreeInsert(&updatedTree, ArenaChunkTree(arena), + tree, ChunkKey(tree), ChunkCompare); + AVER(inserted && updatedTree); + TreeBalance(&updatedTree); + arena->chunkTree = updatedTree; +} + + /* arenaAllocPage -- allocate one page from the arena * * This is a primitive allocator used to allocate pages for the arena CBS. @@ -668,7 +694,8 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) goto found; closure.avoid = arena->primary; - if (SplayTreeTraverse(ArenaChunkTree(arena), arenaAllocPageInChunk, &closure, 0) + if (TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, + arenaAllocPageInChunk, &closure, 0) == FALSE) goto found; @@ -907,7 +934,7 @@ static Res arenaAllocFromCBS(Tract *tractReturn, ZoneSet zones, Bool high, /* Step 2. Make memory available in the address space range. */ - b = CHUNK_OF_ADDR(&chunk, arena, RangeBase(&range)); + b = ChunkOfAddr(&chunk, arena, RangeBase(&range)); AVER(b); AVER(RangeIsAligned(&range, ChunkPageSize(chunk))); baseIndex = INDEX_OF_ADDR(chunk, RangeBase(&range)); diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index 1a2c52f30e0..7d8c3143f3d 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -290,14 +290,15 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) static void ClientArenaFinish(Arena arena) { ClientArena clientArena; + Tree *treeref, tree, next; clientArena = Arena2ClientArena(arena); AVERT(ClientArena, clientArena); /* destroy all chunks, including the primary */ arena->primary = NULL; - while (!SplayTreeIsEmpty(ArenaChunkTree(arena))) { - clientChunkDestroy(ChunkOfTree(SplayTreeRoot(ArenaChunkTree(arena)))); + TREE_DESTROY(treeref, tree, next, arena->chunkTree) { + clientChunkDestroy(ChunkOfTree(tree)); } clientArena->sig = SigInvalid; @@ -350,8 +351,8 @@ static Size ClientArenaReserved(Arena arena) AVERT(Arena, arena); - TreeTraverse(SplayTreeRoot(ArenaChunkTree(arena)), ChunkCompare, ChunkKey, - clientArenaReservedVisitor, &size, 0); + (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, + clientArenaReservedVisitor, &size, 0); return size; } diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index e5343c1329a..210c943d047 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -589,6 +589,7 @@ static void VMArenaFinish(Arena arena) { VMArena vmArena; VM arenaVM; + Tree *treeref, tree, next; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -598,10 +599,10 @@ static void VMArenaFinish(Arena arena) /* destroy all chunks, including the primary */ arena->primary = NULL; - while (!SplayTreeIsEmpty(ArenaChunkTree(arena))) { - vmChunkDestroy(ChunkOfTree(SplayTreeRoot(ArenaChunkTree(arena)))); + TREE_DESTROY(treeref, tree, next, arena->chunkTree) { + vmChunkDestroy(ChunkOfTree(tree)); } - + /* Destroying the chunks should have purged and removed all spare pages. */ RingFinish(&vmArena->spareRing); @@ -645,8 +646,8 @@ static Size VMArenaReserved(Arena arena) AVERT(Arena, arena); - TreeTraverse(SplayTreeRoot(ArenaChunkTree(arena)), ChunkCompare, ChunkKey, - vmArenaReservedVisitor, &size, 0); + (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, + vmArenaReservedVisitor, &size, 0); return size; } @@ -1067,8 +1068,9 @@ static void VMFree(Addr base, Size size, Pool pool) BTResRange(chunk->allocTable, piBase, piLimit); /* Consider returning memory to the OS. */ - /* TODO: Chunks are only destroyed when ArenaCompact is called, and that is - only called from TraceReclaim. Should consider destroying chunks here. */ + /* TODO: Chunks are only destroyed when ArenaCompact is called, and + that is only called from traceReclaim. Should consider destroying + chunks here. See job003815. */ if (arena->spareCommitted > arena->spareCommitLimit) { /* Purge half of the spare memory, not just the extra sliver, so that we return a reasonable amount of memory in one go, and avoid @@ -1085,7 +1087,7 @@ static void VMCompact(Arena arena, Trace trace) { VMArena vmArena; Size vmem1; - Tree tree; + Tree *tree; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -1093,21 +1095,31 @@ static void VMCompact(Arena arena, Trace trace) vmem1 = VMArenaReserved(arena); - tree = SplayTreeFirst(ArenaChunkTree(arena)); - while (tree != TreeEMPTY) { - Chunk chunk = ChunkOfTree(tree); - TreeKey key = ChunkKey(tree); + /* Destroy all the chunks that are completely free. Be very careful + * about the order of operations on the tree because vmChunkDestroy + * unmaps the memory that the tree node resides in, so the next tree + * node has to be looked up first. TODO: add hysteresis here. See + * job003815. */ + tree = &arena->chunkTree; + TreeToVine(tree); + while (*tree != TreeEMPTY) { + Chunk chunk = ChunkOfTree(*tree); AVERT(Chunk, chunk); if(chunk != arena->primary && BTIsResRange(chunk->allocTable, 0, chunk->pages)) { Addr base = chunk->base; Size size = ChunkSize(chunk); + Tree next = TreeRight(*tree); vmChunkDestroy(chunk); vmArena->contracted(arena, base, size); + *tree = next; + } else { + tree = &(*tree)->right; } - tree = SplayTreeNext(ArenaChunkTree(arena), key); } + TreeBalance(&arena->chunkTree); + { Size vmem0 = trace->preTraceArenaReserved; Size vmem2 = VMArenaReserved(arena); diff --git a/mps/code/gcbench.c b/mps/code/gcbench.c index 8b841cbeeb0..09add03a10b 100644 --- a/mps/code/gcbench.c +++ b/mps/code/gcbench.c @@ -245,7 +245,8 @@ static void arena_setup(gcthread_fn_t fn, } MPS_ARGS_END(args); watch(fn, name); mps_arena_park(arena); - printf("%u chunks\n", (unsigned)SplayDebugCount(ArenaChunkTree(arena))); + printf("%u chunks\n", (unsigned)TreeDebugCount(ArenaChunkTree(arena), + ChunkCompare, ChunkKey)); mps_pool_destroy(pool); mps_fmt_destroy(format); if (ngen > 0) diff --git a/mps/code/mpm.h b/mps/code/mpm.h index 33c994c1236..1457677e9df 100644 --- a/mps/code/mpm.h +++ b/mps/code/mpm.h @@ -518,7 +518,7 @@ extern Ring GlobalsRememberedSummaryRing(Globals); #define ArenaAlign(arena) ((arena)->alignment) #define ArenaGreyRing(arena, rank) (&(arena)->greyRing[rank]) #define ArenaPoolRing(arena) (&ArenaGlobals(arena)->poolRing) -#define ArenaChunkTree(arena) (&(arena)->chunkTree) +#define ArenaChunkTree(arena) RVALUE((arena)->chunkTree) extern void ArenaEnterLock(Arena arena, Bool recursive); extern void ArenaLeaveLock(Arena arena, Bool recursive); @@ -551,6 +551,7 @@ extern Res ArenaStartCollect(Globals globals, int why); extern Res ArenaCollect(Globals globals, int why); extern Bool ArenaHasAddr(Arena arena, Addr addr); extern Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr); +extern void ArenaChunkInsert(Arena arena, Tree tree); extern void ArenaSetEmergency(Arena arena, Bool emergency); extern Bool ArenaEmergency(Arena arean); @@ -615,8 +616,6 @@ extern void ArenaCompact(Arena arena, Trace trace); extern Res ArenaFinalize(Arena arena, Ref obj); extern Res ArenaDefinalize(Arena arena, Ref obj); -extern Bool ArenaIsReservedAddr(Arena arena, Addr addr); - #define ArenaReservoir(arena) (&(arena)->reservoirStruct) #define ReservoirPool(reservoir) (&(reservoir)->poolStruct) diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h index 7334ab56fa4..aa5324e9f27 100644 --- a/mps/code/mpmst.h +++ b/mps/code/mpmst.h @@ -645,7 +645,7 @@ typedef struct mps_arena_s { Addr lastTractBase; /* base address of lastTract */ Chunk primary; /* the primary chunk */ - SplayTreeStruct chunkTree; /* all the chunks */ + Tree chunkTree; /* all the chunks */ Serial chunkSerial; /* next chunk number */ Bool hasFreeCBS; /* Is freeCBS available? */ diff --git a/mps/code/trace.c b/mps/code/trace.c index 219c6d89a2c..db00cb6a201 100644 --- a/mps/code/trace.c +++ b/mps/code/trace.c @@ -1259,7 +1259,12 @@ mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io) { ScanState ss = PARENT(ScanStateStruct, ss_s, mps_ss); Ref ref; + Chunk chunk; + Index i; Tract tract; + Seg seg; + Res res; + Pool pool; /* Special AVER macros are used on the critical path. */ /* See */ @@ -1276,59 +1281,64 @@ mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io) STATISTIC(++ss->fixRefCount); EVENT4(TraceFix, ss, mps_ref_io, ref, ss->rank); - TRACT_OF_ADDR(&tract, ss->arena, ref); - if(tract) { - if(TraceSetInter(TractWhite(tract), ss->traces) != TraceSetEMPTY) { - Seg seg; - if(TRACT_SEG(&seg, tract)) { - Res res; - Pool pool; - STATISTIC(++ss->segRefCount); - STATISTIC(++ss->whiteSegRefCount); - EVENT1(TraceFixSeg, seg); - EVENT0(TraceFixWhite); - pool = TractPool(tract); - res = (*ss->fix)(pool, ss, seg, &ref); - if(res != ResOK) { - /* PoolFixEmergency should never fail. */ - AVER_CRITICAL(ss->fix != PoolFixEmergency); - /* Fix protocol (de facto): if Fix fails, ref must be unchanged - * Justification for this restriction: - * A: it simplifies; - * B: it's reasonable (given what may cause Fix to fail); - * C: the code (here) already assumes this: it returns without - * updating ss->fixedSummary. RHSK 2007-03-21. - */ - AVER(ref == (Ref)*mps_ref_io); - return res; - } - } else { - /* Only tracts with segments ought to have been condemned. */ - /* SegOfAddr FALSE => a ref into a non-seg Tract (poolmv etc) */ - /* .notwhite: ...But it should NOT be white. - * [I assert this both from logic, and from inspection of the - * current condemn code. RHSK 2010-11-30] - */ - NOTREACHED; - } - } else { - /* Tract isn't white. Don't compute seg for non-statistical */ - /* variety. See */ - STATISTIC_STAT - ({ - Seg seg; - if(TRACT_SEG(&seg, tract)) { - ++ss->segRefCount; - EVENT1(TraceFixSeg, seg); - } - }); - } - } else { - /* See */ - AVER(ss->rank < RankEXACT - || !ArenaIsReservedAddr(ss->arena, ref)); + /* This sequence of tests is equivalent to calling TractOfAddr(), + * but inlined so that we can distinguish between "not pointing to + * chunk" and "pointing to chunk but not to tract" so that we can + * check the rank in the latter case. See + * */ + if (!ChunkOfAddr(&chunk, ss->arena, ref)) + /* Reference points outside MPS-managed address space: ignore. */ + goto done; + + i = INDEX_OF_ADDR(chunk, ref); + if (!BTGet(chunk->allocTable, i)) { + /* Reference points into a chunk but not to an allocated tract. + * See */ + AVER_CRITICAL(ss->rank < RankEXACT); + goto done; } + tract = PageTract(&chunk->pageTable[i]); + if (TraceSetInter(TractWhite(tract), ss->traces) == TraceSetEMPTY) { + /* Reference points to a tract that is not white for any of the + * active traces. See */ + STATISTIC_STAT + ({ + if(TRACT_SEG(&seg, tract)) { + ++ss->segRefCount; + EVENT1(TraceFixSeg, seg); + } + }); + goto done; + } + + if (!TRACT_SEG(&seg, tract)) { + /* Tracts without segments must not be condemned. */ + NOTREACHED; + goto done; + } + + STATISTIC(++ss->segRefCount); + STATISTIC(++ss->whiteSegRefCount); + EVENT1(TraceFixSeg, seg); + EVENT0(TraceFixWhite); + pool = TractPool(tract); + res = (*ss->fix)(pool, ss, seg, &ref); + if (res != ResOK) { + /* PoolFixEmergency must not fail. */ + AVER_CRITICAL(ss->fix != PoolFixEmergency); + /* Fix protocol (de facto): if Fix fails, ref must be unchanged + * Justification for this restriction: + * A: it simplifies; + * B: it's reasonable (given what may cause Fix to fail); + * C: the code (here) already assumes this: it returns without + * updating ss->fixedSummary. RHSK 2007-03-21. + */ + AVER_CRITICAL(ref == (Ref)*mps_ref_io); + return res; + } + +done: /* See */ ss->fixedSummary = RefSetAdd(ss->arena, ss->fixedSummary, ref); diff --git a/mps/code/tract.c b/mps/code/tract.c index 6f92178b4fe..2a688bf5ea3 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -160,8 +160,6 @@ Res ChunkInit(Chunk chunk, Arena arena, Size pageTableSize; void *p; Res res; - Bool inserted; - Tree updatedTree = NULL; /* chunk is supposed to be uninitialized, so don't check it. */ AVERT(Arena, arena); @@ -216,16 +214,12 @@ Res ChunkInit(Chunk chunk, Arena arena, } TreeInit(&chunk->chunkTree); - inserted = TreeInsert(&updatedTree, SplayTreeRoot(ArenaChunkTree(arena)), - &chunk->chunkTree, TreeKeyOfAddrVar(chunk), - ChunkCompare); - AVER(inserted && updatedTree); - TreeBalance(&updatedTree); - ArenaChunkTree(arena)->root = updatedTree; chunk->sig = ChunkSig; AVERT(Chunk, chunk); + ArenaChunkInsert(arena, &chunk->chunkTree); + /* As part of the bootstrap, the first created chunk becomes the primary chunk. This step allows AreaFreeCBSInsert to allocate pages. */ if (arena->primary == NULL) @@ -247,10 +241,10 @@ Res ChunkInit(Chunk chunk, Arena arena, void ChunkFinish(Chunk chunk) { - Bool res; Arena arena; AVERT(Chunk, chunk); + AVER(BTIsResRange(chunk->allocTable, 0, chunk->pages)); arena = ChunkArena(chunk); @@ -259,10 +253,6 @@ void ChunkFinish(Chunk chunk) PageIndexBase(chunk, chunk->allocBase), chunk->limit); - res = SplayTreeDelete(ArenaChunkTree(arena), &chunk->chunkTree); - AVER(res); - TreeBalance(&ArenaChunkTree(arena)->root); - chunk->sig = SigInvalid; TreeFinish(&chunk->chunkTree); @@ -322,8 +312,8 @@ Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) AVERT_CRITICAL(Arena, arena); /* addr is arbitrary */ - if (TreeFind(&tree, SplayTreeRoot(ArenaChunkTree(arena)), - TreeKeyOfAddrVar(addr), ChunkCompare) + if (TreeFind(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), + ChunkCompare) == CompareEQUAL) { Chunk chunk = ChunkOfTree(tree); @@ -345,14 +335,16 @@ Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) static Bool chunkAboveAddr(Chunk *chunkReturn, Arena arena, Addr addr) { Tree tree; + Chunk chunk; AVER_CRITICAL(chunkReturn != NULL); AVERT_CRITICAL(Arena, arena); /* addr is arbitrary */ - tree = SplayTreeNext(ArenaChunkTree(arena), TreeKeyOfAddrVar(addr)); - if (tree != TreeEMPTY) { - Chunk chunk = ChunkOfTree(tree); + if (TreeFindNext(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), + ChunkCompare)) + { + chunk = ChunkOfTree(tree); AVER_CRITICAL(addr < chunk->base); *chunkReturn = chunk; return TRUE; @@ -361,19 +353,6 @@ static Bool chunkAboveAddr(Chunk *chunkReturn, Arena arena, Addr addr) } -/* ArenaIsReservedAddr -- is address managed by this arena? */ - -Bool ArenaIsReservedAddr(Arena arena, Addr addr) -{ - Chunk dummy; - - AVERT(Arena, arena); - /* addr is arbitrary */ - - return ChunkOfAddr(&dummy, arena, addr); -} - - /* IndexOfAddr -- return the index of the page containing an address * * Function version of INDEX_OF_ADDR, for debugging purposes. diff --git a/mps/code/tract.h b/mps/code/tract.h index b825e268c68..0b9a86d1df2 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -171,14 +171,6 @@ extern void ChunkCacheEntryInit(ChunkCacheEntry entry); extern Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr); extern Res ChunkNodeDescribe(Tree node, mps_lib_FILE *stream); -/* CHUNK_OF_ADDR -- return the chunk containing an address - * - * arena and addr are evaluated multiple times. - */ - -#define CHUNK_OF_ADDR(chunkReturn, arena, addr) \ - ChunkOfAddr(chunkReturn, arena, addr) - /* AddrPageBase -- the base of the page this address is on */ @@ -191,25 +183,6 @@ extern Res ChunkNodeDescribe(Tree node, mps_lib_FILE *stream); extern Tract TractOfBaseAddr(Arena arena, Addr addr); extern Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr); -/* TRACT_OF_ADDR -- return the tract containing an address */ - -#define TRACT_OF_ADDR(tractReturn, arena, addr) \ - BEGIN \ - Arena _arena = (arena); \ - Addr _addr = (addr); \ - Chunk _chunk; \ - Index _i; \ - \ - if (CHUNK_OF_ADDR(&_chunk, _arena, _addr)) { \ - _i = INDEX_OF_ADDR(_chunk, _addr); \ - if (BTGet(_chunk->allocTable, _i)) \ - *(tractReturn) = PageTract(&_chunk->pageTable[_i]); \ - else \ - *(tractReturn) = NULL; \ - } else \ - *(tractReturn) = NULL; \ - END - /* INDEX_OF_ADDR -- return the index of the page containing an address * diff --git a/mps/code/tree.c b/mps/code/tree.c index 0f17e0672f9..0e1bb372f65 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -117,6 +117,49 @@ Compare TreeFind(Tree *treeReturn, Tree root, TreeKey key, TreeCompare compare) } +/* TreeFindNext -- search for node containing key, or next node + * + * If there is a node that is greater than key, set treeReturn to that + * node and return TRUE. + * + * Otherwise, key is greater than all nodes in the tree, so leave + * treeReturn unchanged and return FALSE. + */ + +Bool TreeFindNext(Tree *treeReturn, Tree root, TreeKey key, TreeCompare compare) +{ + Tree node, best = NULL; + Bool result = FALSE; + + AVERT(Tree, root); + AVER(treeReturn != NULL); + AVER(FUNCHECK(compare)); + /* key is arbitrary */ + + node = root; + while (node != TreeEMPTY) { + Compare cmp = compare(node, key); + switch (cmp) { + case CompareLESS: + best = node; + result = TRUE; + node = node->left; + break; + case CompareEQUAL: + case CompareGREATER: + node = node->right; + break; + default: + NOTREACHED; + return FALSE; + } + } + + *treeReturn = best; + return result; +} + + /* TreeInsert -- insert a node into a tree * * If the key doesn't exist in the tree, inserts a node as a leaf of the diff --git a/mps/code/tree.h b/mps/code/tree.h index 159550406ac..757344c2c64 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -113,6 +113,8 @@ extern Count TreeDebugCount(Tree tree, TreeCompare compare, TreeKeyMethod key); extern Compare TreeFind(Tree *treeReturn, Tree root, TreeKey key, TreeCompare compare); +extern Bool TreeFindNext(Tree *treeReturn, Tree root, + TreeKey key, TreeCompare compare); extern Bool TreeInsert(Tree *treeReturn, Tree root, Tree node, TreeKey key, TreeCompare compare); @@ -132,6 +134,20 @@ extern Tree TreeReverseRightSpine(Tree tree); extern Count TreeToVine(Tree *treeIO); extern void TreeBalance(Tree *treeIO); +/* TREE_DESTROY -- iterate over a tree while destroying it. + * + * root is a lvalue storing the root of the tree. + * treeref is a variable of type Tree*. + * tree and next are variables of type Tree. + * In the body of the loop, tree is the current node. + */ +#define TREE_DESTROY(treeref, tree, next, root) \ + for ((treeref = &(root), TreeToVine(treeref)); \ + (tree = *treeref) != TreeEMPTY \ + ? (next = tree->right, TRUE) \ + : (TreeBalance(treeref), FALSE); \ + *treeref = next) + #endif /* tree_h */ diff --git a/mps/design/arena.txt b/mps/design/arena.txt index a1fae81ce5e..6c48d2ad111 100644 --- a/mps/design/arena.txt +++ b/mps/design/arena.txt @@ -273,7 +273,9 @@ memory represented by the tract. _`.tract.field.white`: The white bit-field indicates for which traces the tract is white (`.req.fun.trans.white`_). This information is also stored in the segment, but is duplicated here for efficiency during a -call to ``TraceFix`` (see design.mps.trace.fix). +call to ``TraceFix`` (see `design.mps.trace.fix`_). + +.. _design.mps.trace.fix: trace#fix _`.tract.limit`: The limit of the tract's memory may be determined by adding the arena alignment to the base address. @@ -283,9 +285,8 @@ design.mps.arena.tract-iter(0). ``Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr)`` -_`.tract.if.tractofaddr`: The function ``TractOfAddr()`` finds the tract -corresponding to an address in memory. (See `.req.fun.trans`_.) - +_`.tract.if.tractofaddr`: The function ``TractOfAddr()`` finds the +tract corresponding to an address in memory. (See `.req.fun.trans`_.) If ``addr`` is an address which has been allocated to some pool, then ``TractOfAddr()`` returns ``TRUE``, and sets ``*tractReturn`` to the tract corresponding to that address. Otherwise, it returns ``FALSE``. @@ -293,10 +294,6 @@ This function is similar to ``TractOfBaseAddr()`` (see design.mps.arena.tract-iter.if.contig-base) but serves a more general purpose and is less efficient. -_`.tract.if.TRACT_OF_ADDR`: ``TRACT_OF_ADDR()`` is a macro version of -``TractOfAddr()``. It's provided for efficiency during a call to -``TraceFix()`` (see design.mps.trace.fix.tractofaddr). - Control pool ............ diff --git a/mps/design/class-interface.txt b/mps/design/class-interface.txt index 1f716703dfd..ba0b6124d8d 100644 --- a/mps/design/class-interface.txt +++ b/mps/design/class-interface.txt @@ -171,13 +171,12 @@ reference in them. jumping through hoops required. David Jones, 1998-01-30. The ``fix`` field is used to perform fixing. This method is called via -the generic function ``TraceFix()``. It indicates that the specified -reference has been found and the class should consider the object -live. There is provision for adjusting the value of the reference (to -allow for classes that move objects). Classes are not required to -provide this method, and not doing so indicates that the class is not -automatic style (ie it does not use global tracing to determine -liveness). +the function ``TraceFix()``. It indicates that the specified reference +has been found and the class should consider the object live. There is +provision for adjusting the value of the reference (to allow for +classes that move objects). Classes are not required to provide this +method, and not doing so indicates that the class is not automatic +style (ie it does not use global tracing to determine liveness). The ``reclaim`` field is used to reclaim memory. This method is called via the generic function ``PoolReclaim()``. It indicates that the trace diff --git a/mps/design/critical-path.txt b/mps/design/critical-path.txt index 9bcac7e17e8..f5714cee04e 100644 --- a/mps/design/critical-path.txt +++ b/mps/design/critical-path.txt @@ -230,20 +230,21 @@ If a pointer gets past the first-stage fix filters, it is passed to yet more pointers using information about segments before it has to consult the pool class. -The first test applied is the "tract test". The MPS looks up the tract -containing the address in the tract table, which is a simple linear -table indexed by the address shifted -- a kind of flat page table. +The first test is to determine if the address points to a *chunk* (a +contiguous regions of address space managed by the arena). Addresses +that do not point to any chunk (for example, ambiguous references that +are not in fact pointers) are rejected immediately. -Note that if the arena has been extended, the tract table becomes less -simple, and this test may involved looking in more than one table. -This will cause a considerable slow-down in garbage collection -scanning. This is the reason that it's important to give a good +When there are many chunks (that is, when the arena has been extended +many times), this test can consume the majority of the garbage +collection time. This is the reason that it's important to give a good estimate of the amount of address space you will ever occupy with objects when you initialize the arena. -The pointer might not even be in the arena (and so not in any tract). -The first stage fix doesn't guarantee it. So we eliminate any pointers -not in the arena at this stage. +The second test applied is the "tract test". The MPS looks up the +tract containing the address in the tract table, which is a simple +linear table indexed by the address shifted -- a kind of flat page +table. If the pointer is in an allocated tract, then the table also contains a cache of the "white set" -- the set of garbage collection traces for diff --git a/mps/design/scan.txt b/mps/design/scan.txt index 1683fecc366..a1822a3ce6a 100644 --- a/mps/design/scan.txt +++ b/mps/design/scan.txt @@ -19,8 +19,8 @@ Scanned summary ............... _`.summary.subset`: The summary of reference seens by scan -(ss.unfixedSummary) is a subset of the summary previously computed -(SegSummary). +(``ss.unfixedSummary``) is a subset of the summary previously computed +(``SegSummary()``). There are two reasons that it is not an equality relation: @@ -34,9 +34,11 @@ There are two reasons that it is not an equality relation: #. A write barrier hit will set the summary to ``RefSetUNIV``. -The reason that ss.unfixedSummary is always a subset of the previous -summary is due to an "optimization" which has not been made in -``TraceFix``. See impl.c.trace.fix.fixed.all. +The reason that ``ss.unfixedSummary`` is always a subset of the +previous summary is due to an "optimization" which has not been made +in ``TraceFix``. See `design.mps.trace.fix.fixed.all`_. + +.. _design.mps.trace.fix.fixed.all: trace#fix.fixed.all Partial scans @@ -54,8 +56,8 @@ partial scans of condemned segments contribute to the segment summary. _`.clever-summary.acc`: Each time we partially scan a segment, we accumulate the post-scan summary of the scanned objects into a field -in the group, called 'summarySoFar'. The post-scan summary is (summary -\ white) U fixed. +in the group, called ``summarySoFar``. The post-scan summary is +(summary \ white) ∪ fixed. _`.clever-summary.acc.condemn`: The cumulative summary is only meaningful while the segment is condemned. Otherwise it is set to diff --git a/mps/design/trace.txt b/mps/design/trace.txt index 4186a90215d..f1d25bf25bc 100644 --- a/mps/design/trace.txt +++ b/mps/design/trace.txt @@ -25,15 +25,16 @@ Introduction Architecture ------------ -_`.instance.limit`: There will be a limit on the number of traces that -can be created at any one time. This effectively limits the number of -concurrent traces. This limitation is expressed in the symbol -``TRACE_MAX``. +_`.instance.limit`: There is a limit on the number of traces that can +be created at any one time. This limits the number of concurrent +traces. This limitation is expressed in the symbol ``TraceLIMIT``. .. note:: - ``TRACE_MAX`` is currently set to 1, see request.mps.160020_ - "Multiple traces would not work". David Jones, 1998-06-15. + ``TraceLIMIT`` is currently set to 1 as the MPS assumes in various + places that only a single trace is active at a time. See + request.mps.160020_ "Multiple traces would not work". David Jones, + 1998-06-15. .. _request.mps.160020: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/mps/160020 @@ -46,26 +47,32 @@ _`.rate`: See `mail.nickb.1997-07-31.14-37 fixedSummary`` is accumulated (in the fixer) -for all the pointers whether or not they are genuine references. We -could accumulate fewer pointers here; if a pointer fails the -``TractOfAddr()`` test then we know it isn't a reference, so we needn't -accumulate it into the fixed summary. The design allows this, but it -breaks a useful post-condition on scanning (if the accumulation of -``ss->fixedSummary`` was moved the accuracy of ``ss->fixedSummary`` -would vary according to the "width" of the white summary). See -mail.pekka.1998-02-04.16-48 for improvement suggestions. +.. note:: + + Depending on the future semantics of ``PoolDestroy()`` we might + need to adjust our strategy here. See `mail.dsm.1996-02-14.18-18`_ + for a strategy of coping gracefully with ``PoolDestroy()``. + +.. _mail.dsm.1996-02-14.18-18: https://info.ravenbrook.com/project/mps/mail/1996/02/14/18-18/0.txt + +_`.fix.fixed.all`: ``ss->fixedSummary`` is accumulated (in +``TraceFix()``) for all pointers, whether or not they are genuine +references. We could accumulate fewer pointers here; if a pointer +fails the ``TractOfAddr()`` test then we know it isn't a reference, so +we needn't accumulate it into the fixed summary. The design allows +this, but it breaks a useful post-condition on scanning (if the +accumulation of ``ss->fixedSummary`` was moved the accuracy of +``ss->fixedSummary`` would vary according to the "width" of the white +summary). See `mail.pekka.1998-02-04.16-48`_ for improvement suggestions. + +.. _mail.pekka.1998-02-04.16-48: https://info.ravenbrook.com/project/mps/mail/1998/02/04/16-48/0.txt Analysis @@ -81,6 +88,7 @@ memory for copying. .. _request.dylan.170560: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170560 + Ideas ----- @@ -96,30 +104,56 @@ Implementation Speed ..... -_`.fix`: The fix path is critical to garbage collection speed. -Abstractly fix is applied to all the references in the non-white heap -and all the references in the copied heap. Remembered sets cut down -the number of segments we have to scan. The zone test cuts down the -number of references we call fix on. The speed of the remainder of the -fix path is still critical to system performance. Various -modifications to and aspects of the system are concerned with -maintaining the speed along this path. +_`.fix`: The function implementing the fix operation should be called +``TraceFix()`` and this name is pervasive in the MPS and its documents +to describe this function. Nonethless, optimisation and strict +aliasing rules have meant that we need to use the external name for +it, ``_mps_fix2()``. -_`.fix.tractofaddr`: ``TractOfAddr()`` is called on every reference that -passes the zone test and is on the critical path, to determine whether -the segment is white. There is no need to examine the segment to -perform this test, since whiteness information is duplicated in -tracts, specifically to optimize this test. ``TractOfAddr()`` itself is -a simple class dispatch function (which dispatches to the arena -class's ``TractOfAddr()`` method). Inlining the dispatch and inlining -the functions called by ``VMTractOfAddr()`` makes a small but noticable -difference to the speed of the dylan compiler. +_`.fix.speed`: The fix path is critical to garbage collection speed. +Abstractly, the fix operation is applied to all references in the +non-white heap and all references in the copied heap. Remembered sets +cut down the number of segments we have to scan. The zone test cuts +down the number of references we call fix on. The speed of the +remainder of the fix path is still critical to system performance. +Various modifications to and aspects of the system are concerned with +maintaining the speed along this path. See +`design.mps.critical_path`_. + +.. _design.mps.critical_path: critical_path + +_`.fix.tractofaddr`: A reference that passes the zone test is then +looked up to find the tract it points to, an operation equivalent to +calling ``TractOfAddr()``. + +_`.fix.tractofaddr.inline`: ``TraceFix()`` doesn't actually call +``TractOfAddr()``. Instead, it expands this operation inline (calling +``ChunkOfAddr()``, then ``INDEX_OF_ADDR()``, checking the appropriate +bit in the chunk's ``allocTable``, and finally looking up the tract in +the chunk's page table). The reason for inlining this code is that we +need to know whether the reference points to a chunk (and not just +whether it points to a tract) in order to check the `.exact.legal`_ +condition. + +_`.fix.whiteseg`: The reason for looking up the tract is to determine +whether the segment is white. There is no need to examine the segment +to perform this test, since whiteness information is duplicated in +tracts, specifically to optimize this test. + +.. note:: + + Nonetheless, it is likely to be more efficient to maintain a + separate lookup table from address to white segment, rather than + indirecting through the chunk and the tract. See job003796_. + +.. _job003796: http://www.ravenbrook.com/project/mps/issue/job003796/ _`.fix.noaver`: ``AVER()`` statements in the code add bulk to the code (reducing I-cache efficacy) and add branches to the path (polluting -the branch pedictors) resulting in a slow down. Removing all the -``AVER()`` statements from the fix path improves the overall speed of -the Dylan compiler by as much as 9%. +the branch pedictors) resulting in a slow down. Replacing the +``AVER()`` statements with ``AVER_CRITICAL()`` on the critical path +improves the overall speed of the Dylan compiler by as much as 9%. See +`design.mps.critical_path`_. _`.fix.nocopy`: ``AMCFix()`` used to copy objects by using the format's copy method. This involved a function call (through an indirection) @@ -131,19 +165,15 @@ inlined by the C compiler. This change results in a 4–5% speed-up in the Dylan compiler. _`.reclaim`: Because the reclaim phase of the trace (implemented by -``TraceReclaim()``) examines every segment it is fairly time intensive. -rit's profiles presented in request.dylan.170551_ show a gap between -the two varieties variety.hi and variety.wi. +``TraceReclaim()``) examines every segment it is fairly time +intensive. Richard Tucker's profiles presented in +request.dylan.170551_ show a gap between the two varieties variety.hi +and variety.wi. .. _request.dylan.170551: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170551 -_`.reclaim.noaver`: Converting ``AVER()`` statements in the loops of -``TraceReclaim()``, ``PoolReclaim()``, ``AMCReclaim()`` (``LOReclaim()``? -``AWLReclaim()``?) will result in a noticeable speed improvement. - -.. note:: - - Insert actual speed improvement here, if any. +_`.reclaim.noaver`: Accordingly, reclaim methods use +``AVER_CRITICAL()`` instead of ``AVER()``. Life cycle of a trace object From dbfe3ca4258fff6bba2ddb3ab80ce6324eb2768c Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Tue, 20 May 2014 19:19:14 +0100 Subject: [PATCH 09/24] Fix compilation on windows. Fix bug in ArenaDescribeTracts (only described the first tract). Copied from Perforce Change: 186215 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 1 - mps/code/pool.c | 2 +- mps/code/tree.h | 10 +++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 1bee7fd7795..6c1990ee880 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -521,7 +521,6 @@ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) (WriteFS)(TractPool(tract)->class->name), NULL); if (res != ResOK) return res; - return ResOK; } return ResOK; } diff --git a/mps/code/pool.c b/mps/code/pool.c index 060da7a47d5..0bcae0e0648 100644 --- a/mps/code/pool.c +++ b/mps/code/pool.c @@ -626,7 +626,7 @@ Bool PoolOfAddr(Pool *poolReturn, Arena arena, Addr addr) Bool PoolOfRange(Pool *poolReturn, Arena arena, Addr base, Addr limit) { Bool havePool = FALSE; - Pool pool; + Pool pool = NULL; Tract tract; Addr addr, alignedBase, alignedLimit; diff --git a/mps/code/tree.h b/mps/code/tree.h index 757344c2c64..6c5ea33320e 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -141,11 +141,11 @@ extern void TreeBalance(Tree *treeIO); * tree and next are variables of type Tree. * In the body of the loop, tree is the current node. */ -#define TREE_DESTROY(treeref, tree, next, root) \ - for ((treeref = &(root), TreeToVine(treeref)); \ - (tree = *treeref) != TreeEMPTY \ - ? (next = tree->right, TRUE) \ - : (TreeBalance(treeref), FALSE); \ +#define TREE_DESTROY(treeref, tree, next, root) \ + for ((treeref = &(root), TreeToVine(treeref), next = TreeEMPTY); \ + (tree = *treeref) != TreeEMPTY \ + ? (next = tree->right, TRUE) \ + : (TreeBalance(treeref), FALSE); \ *treeref = next) From a0e076be57fe455e7dededa6b925cbaff875d428 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Wed, 21 May 2014 00:43:06 +0100 Subject: [PATCH 10/24] Improvements following review. Copied from Perforce Change: 186227 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 37 +++++++++++++-------------- mps/code/arenacl.c | 3 ++- mps/code/arenavm.c | 13 +++++----- mps/code/splay.c | 23 +++++------------ mps/code/splay.h | 3 +-- mps/code/tree.c | 4 +-- mps/code/tree.h | 4 +-- mps/design/arena.txt | 61 ++++++++++++++++++++++++++++++++++++++------ mps/design/locus.txt | 5 +++- mps/design/seg.txt | 17 ++++++------ mps/design/type.txt | 8 +++--- 11 files changed, 107 insertions(+), 71 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 6c1990ee880..89abc7e0e7c 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -494,7 +494,7 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream) } -/* ArenaDescribeTractsInChunk -- describe all the tracts in a chunk */ +/* ArenaDescribeTractsInChunk -- describe the tracts in a chunk */ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) { @@ -509,20 +509,30 @@ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) if (stream == NULL) return ResFAIL; UNUSED(closureS); + res = WriteF(stream, "Chunk [$P, $P) ($U) {\n", + (WriteFP)chunk->base, (WriteFP)chunk->limit, + (WriteFU)chunk->serial, + NULL); + if (res != ResOK) return res; + TRACT_TRACT_FOR(tract, addr, ChunkArena(chunk), PageTract(ChunkPage(chunk, chunk->allocBase)), chunk->limit) { res = WriteF(stream, - "[$P, $P) $U $P ($S)\n", + " [$P, $P) $P $U ($S)\n", (WriteFP)TractBase(tract), (WriteFP)TractLimit(tract), - (WriteFW)ArenaAlign(ChunkArena(chunk)), (WriteFP)TractPool(tract), + (WriteFU)(TractPool(tract)->serial), (WriteFS)(TractPool(tract)->class->name), NULL); if (res != ResOK) return res; } - return ResOK; + + res = WriteF(stream, "} Chunk [$P, $P)\n", + (WriteFP)chunk->base, (WriteFP)chunk->limit, + NULL); + return res; } @@ -599,17 +609,7 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream) } -/* ArenaChunkInsert -- insert chunk into arena's chunk tree - * - * Note that there's no corresponding ArenaChunkDelete. That's because - * we don't have a function that deletes an item from a balanced tree - * efficiently. Instead, deletions from the chunk tree are carried out - * by calling TreeToVine, iterating over the vine (where deletion is - * straightforward) and then calling TreeBalance. This is efficient - * when deleting all the chunks at a time in ArenaFinish, and - * acceptable in VMCompact when multiple chunks may be deleted from - * the tree. - */ +/* ArenaChunkInsert -- insert chunk into arena's chunk tree */ void ArenaChunkInsert(Arena arena, Tree tree) { Bool inserted; @@ -689,13 +689,12 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) /* Favour the primary chunk, because pages allocated this way aren't currently freed, and we don't want to prevent chunks being destroyed. */ /* TODO: Consider how the ArenaCBSBlockPool might free pages. */ - if (arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, 0) == FALSE) + if (!arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, 0)) goto found; closure.avoid = arena->primary; - if (TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, - arenaAllocPageInChunk, &closure, 0) - == FALSE) + if (!TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, + arenaAllocPageInChunk, &closure, 0)) goto found; return ResRESOURCE; diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index 7d8c3143f3d..0129c63fea9 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -295,7 +295,8 @@ static void ClientArenaFinish(Arena arena) clientArena = Arena2ClientArena(arena); AVERT(ClientArena, clientArena); - /* destroy all chunks, including the primary */ + /* Destroy all chunks, including the primary. See + * */ arena->primary = NULL; TREE_DESTROY(treeref, tree, next, arena->chunkTree) { clientChunkDestroy(ChunkOfTree(tree)); diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 210c943d047..538f50b134a 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -597,12 +597,13 @@ static void VMArenaFinish(Arena arena) EVENT1(ArenaDestroy, vmArena); - /* destroy all chunks, including the primary */ + /* Destroy all chunks, including the primary. See + * */ arena->primary = NULL; TREE_DESTROY(treeref, tree, next, arena->chunkTree) { vmChunkDestroy(ChunkOfTree(tree)); } - + /* Destroying the chunks should have purged and removed all spare pages. */ RingFinish(&vmArena->spareRing); @@ -1095,11 +1096,9 @@ static void VMCompact(Arena arena, Trace trace) vmem1 = VMArenaReserved(arena); - /* Destroy all the chunks that are completely free. Be very careful - * about the order of operations on the tree because vmChunkDestroy - * unmaps the memory that the tree node resides in, so the next tree - * node has to be looked up first. TODO: add hysteresis here. See - * job003815. */ + /* Destroy chunks that are completely free, but not the primary + * chunk. See + * TODO: add hysteresis here. See job003815. */ tree = &arena->chunkTree; TreeToVine(tree); while (*tree != TreeEMPTY) { diff --git a/mps/code/splay.c b/mps/code/splay.c index 41cc3069412..fe677a3c866 100644 --- a/mps/code/splay.c +++ b/mps/code/splay.c @@ -694,7 +694,7 @@ static Compare SplaySplay(SplayTree splay, TreeKey key, TreeCompare compare) SplayStateStruct stateStruct; #ifdef SPLAY_DEBUG - Count count = TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey); + Count count = SplayDebugCount(splay); #endif /* Short-circuit common cases. Splay trees often bring recently @@ -714,7 +714,7 @@ static Compare SplaySplay(SplayTree splay, TreeKey key, TreeCompare compare) SplayTreeSetRoot(splay, stateStruct.middle); #ifdef SPLAY_DEBUG - AVER(count == TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey)); + AVER(count == SplayDebugCount(splay)); #endif return cmp; @@ -909,7 +909,7 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, Bool found; Compare cmp; #ifdef SPLAY_DEBUG - Count count = TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey); + Count count = SplayDebugCount(splay); #endif @@ -951,7 +951,7 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, SplayTreeSetRoot(splay, stateStruct.middle); #ifdef SPLAY_DEBUG - AVER(count == TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey)); + AVER(count == SplayDebugCount(splay)); #endif return found; @@ -970,9 +970,8 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, * unmodified. * * IMPORTANT: Iterating over the tree using these functions will leave - * the tree totally unbalanced, throwing away optimisations of the - * tree shape caused by previous splays. Consider using - * SplayTreeTraverse instead. + * the tree totally unbalanced, throwing away optimisations of the tree + * shape caused by previous splays. Consider using TreeTraverse instead. */ Tree SplayTreeFirst(SplayTree splay) { @@ -1013,16 +1012,6 @@ Tree SplayTreeNext(SplayTree splay, TreeKey oldKey) { } -/* SplayTreeTraverse -- iterate over splay tree without splaying it */ - -Bool SplayTreeTraverse(SplayTree splay, TreeVisitor visitor, - void *closureP, Size closureS) -{ - return TreeTraverse(splay->root, splay->compare, splay->nodeKey, - visitor, closureP, closureS); -} - - /* SplayNodeDescribe -- Describe a node in the splay tree * * Note that this breaks the restriction of .note.stack. diff --git a/mps/code/splay.h b/mps/code/splay.h index 9bfb27c0ed1..8aa335e88a1 100644 --- a/mps/code/splay.h +++ b/mps/code/splay.h @@ -55,8 +55,6 @@ extern Bool SplayTreeNeighbours(Tree *leftReturn, extern Tree SplayTreeFirst(SplayTree splay); extern Tree SplayTreeNext(SplayTree splay, TreeKey oldKey); -extern Bool SplayTreeTraverse(SplayTree splay, TreeVisitor visitor, - void *closureP, Size closureS); typedef Bool (*SplayFindMethod)(Tree *nodeReturn, SplayTree splay, SplayTestNodeMethod testNode, @@ -79,6 +77,7 @@ extern Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, extern void SplayDebugUpdate(SplayTree splay, Tree tree); extern Count SplayDebugCount(SplayTree splay); + #endif /* splay_h */ diff --git a/mps/code/tree.c b/mps/code/tree.c index 0e1bb372f65..dbf112f31bc 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -119,11 +119,11 @@ Compare TreeFind(Tree *treeReturn, Tree root, TreeKey key, TreeCompare compare) /* TreeFindNext -- search for node containing key, or next node * - * If there is a node that is greater than key, set treeReturn to that + * If there is a node that is greater than key, set *treeReturn to that * node and return TRUE. * * Otherwise, key is greater than all nodes in the tree, so leave - * treeReturn unchanged and return FALSE. + * *treeReturn unchanged and return FALSE. */ Bool TreeFindNext(Tree *treeReturn, Tree root, TreeKey key, TreeCompare compare) diff --git a/mps/code/tree.h b/mps/code/tree.h index 6c5ea33320e..6d2fde3ea6f 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -136,7 +136,7 @@ extern void TreeBalance(Tree *treeIO); /* TREE_DESTROY -- iterate over a tree while destroying it. * - * root is a lvalue storing the root of the tree. + * root is an lvalue storing the root of the tree. * treeref is a variable of type Tree*. * tree and next are variables of type Tree. * In the body of the loop, tree is the current node. @@ -145,7 +145,7 @@ extern void TreeBalance(Tree *treeIO); for ((treeref = &(root), TreeToVine(treeref), next = TreeEMPTY); \ (tree = *treeref) != TreeEMPTY \ ? (next = tree->right, TRUE) \ - : (TreeBalance(treeref), FALSE); \ + : FALSE; \ *treeref = next) diff --git a/mps/design/arena.txt b/mps/design/arena.txt index 6c48d2ad111..28dd6334065 100644 --- a/mps/design/arena.txt +++ b/mps/design/arena.txt @@ -220,6 +220,49 @@ implementations of those methods which must be overridden. Instead each abstract method is initialized to ``NULL``. +Chunks +...... + +_`.chunk`: Each contiguous region of address space managed by the MPS +is represented by a *chunk*. + +_`.chunk.tracts`: A chunk contains a table of tracts. See `.tract`_. + +_`.chunk.lookup`: Looking of the chunk of an address is the first +step in the second-stage fix operation, and so on the critical path. +See `design.mps.critical-path`_. + +.. _design.mps.critical-path: critical-path + +_`.chunk.tree`: For efficient lookup, chunks are stored in a balanced +tree; ``arena->chunkTree`` points to the root of the tree. Operations +on this tree must ensure that the tree remains balanced, otherwise +performance degrades badly with many chunks. + +_`.chunk.insert`: New chunks are inserted into the tree by calling +``ArenaChunkInsert()``. This calls ``TreeInsert()``, followed by +``TreeBalance()`` to ensure that the tree is balanced. + +_`.chunk.delete`: There is no corresponding function +``ArenaChunkDelete()``. Instead, deletions from the chunk tree are +carried out by calling ``TreeToVine()``, iterating over the vine +(where deletion is straightforward, with care) and then calling +``TreeBalance()`` if any chunks might remain. The macro +``TREE_DESTROY()`` assists with the common case. + +_`.chunk.delete.justify`: This is because we don't have a function +that deletes an item from a balanced tree efficiently, and because all +functions that delete chunks do so in a loop that may delete multiple +chunks. The procedure is efficient when deleting all the chunks in +``ArenaFinish()``, and has acceptable performance in ``VMCompact()`` +where multiple chunks may be deleted from the tree. + +_`.chunk.delete.tricky`: Deleting chunks from the chunk tree is tricky +in the virtual memory arena because ``vmChunkDestroy()`` unmaps the +memory containing the chunk, which includes the tree node. So the next +chunk must be looked up before deleting the current chunk. + + Tracts ...... @@ -233,11 +276,11 @@ associating their own data with each allocation grain. _`.tract.structure`: The tract structure definition looks like this:: typedef struct TractStruct { /* Tract structure */ - PagePoolUnion pool; /* MUST BE FIRST (design.mps.arena.tract.field.pool) */ - void *p; /* pointer for use of owning pool */ - Addr base; /* Base address of the tract */ - TraceSet white : TRACE_MAX; /* traces for which tract is white */ - unsigned int hasSeg : 1; /* does tract have a seg in p? */ + PagePoolUnion pool; /* MUST BE FIRST ( pool) */ + void *p; /* pointer for use of owning pool */ + Addr base; /* Base address of the tract */ + TraceSet white : TraceLIMIT; /* traces for which tract is white */ + unsigned hasSeg : 1; /* does tract have a seg in p? See .bool */ } TractStruct; _`.tract.field.pool`: The pool.pool field indicates to which pool the tract @@ -263,9 +306,11 @@ use it for any purpose. _`.tract.field.hasSeg`: The ``hasSeg`` bit-field is a Boolean which indicates whether the ``p`` field is being used by the segment module. If this field is ``TRUE``, then the value of ``p`` is a ``Seg``. -``hasSeg`` is typed as an ``unsigned int``, rather than a ``Bool``. -This ensures that there won't be sign conversion problems when -converting the bit-field value. +``hasSeg`` has type ``unsigned:1`` rather than ``Bool:1`` to avoid +sign conversion problems when converting the bit-field value. See +`design.mps.type.bool.bitfield`_. + +.. _design.mps.type.bool.bitfield: type#bool.bitfield _`.tract.field.base`: The base field contains the base address of the memory represented by the tract. diff --git a/mps/design/locus.txt b/mps/design/locus.txt index 1e70de035bd..501578ee724 100644 --- a/mps/design/locus.txt +++ b/mps/design/locus.txt @@ -511,7 +511,10 @@ requested (to allow for large objects). _`.arch.chunk`: Arenas may allocate more address space in additional chunks, which may be disjoint from the existing chunks. Inter-chunk space will be represented by dummy regions. There are also sentinel -regions at both ends of the address space. +regions at both ends of the address space. See +`design.mps.arena.chunk`_. + +.. _design.mps.arena.chunk: arena#chunk Overview of strategy diff --git a/mps/design/seg.txt b/mps/design/seg.txt index 87ff87575f6..55634a92092 100644 --- a/mps/design/seg.txt +++ b/mps/design/seg.txt @@ -63,21 +63,20 @@ Data Structure The implementations are as follows:: typedef struct SegStruct { /* segment structure */ - Sig sig; /* impl.h.misc.sig */ + Sig sig; /* */ SegClass class; /* segment class structure */ Tract firstTract; /* first tract of segment */ RingStruct poolRing; /* link in list of segs in pool */ Addr limit; /* limit of segment */ - unsigned depth : SHIELD_DEPTH_WIDTH; /* see impl.c.shield.def.depth */ - AccessSet pm : AccessMAX; /* protection mode, impl.c.shield */ - AccessSet sm : AccessMAX; /* shield mode, impl.c.shield */ - TraceSet grey : TRACE_MAX; /* traces for which seg is grey */ - TraceSet white : TRACE_MAX; /* traces for which seg is white */ - TraceSet nailed : TRACE_MAX; /* traces for which seg has nailed objects */ - RankSet rankSet : RankMAX; /* ranks of references in this seg */ + unsigned depth : ShieldDepthWIDTH; /* see */ + AccessSet pm : AccessLIMIT; /* protection mode, */ + AccessSet sm : AccessLIMIT; /* shield mode, */ + TraceSet grey : TraceLIMIT; /* traces for which seg is grey */ + TraceSet white : TraceLIMIT; /* traces for which seg is white */ + TraceSet nailed : TraceLIMIT; /* traces for which seg has nailed objects */ + RankSet rankSet : RankLIMIT; /* ranks of references in this seg */ } SegStruct; - typedef struct GCSegStruct { /* GC segment structure */ SegStruct segStruct; /* superclass fields must come first */ RingStruct greyRing; /* link in list of grey segs */ diff --git a/mps/design/type.txt b/mps/design/type.txt index baee04ef3d2..f60ac1d7ba3 100644 --- a/mps/design/type.txt +++ b/mps/design/type.txt @@ -564,9 +564,11 @@ space as the client data. ``typedef unsigned TraceId`` _`.traceid`: A ``TraceId`` is an unsigned integer which is less than -``TRACE_MAX``. Each running trace has a different ``TraceId`` which is -used to index into tables and bitfields used to remember the state of -that trace. +``TraceLIMIT``. Each running trace has a different ``TraceId`` which +is used to index into the tables and bitfields that record the state +of that trace. See `design.mps.trace.instance.limit`_. + +.. _design.mps.trace.instance.limit: trace#instance.limit ``typedef unsigned TraceSet`` From 58f35172c7fdac22a9c3698931bf086e92c53ed2 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Wed, 21 May 2014 11:38:59 +0100 Subject: [PATCH 11/24] Tree_traverse_and_delete is a better name than tree_destroy. Generalize this macro so it can be used in all three cases. Copied from Perforce Change: 186228 ServerID: perforce.ravenbrook.com --- mps/code/arenacl.c | 4 ++-- mps/code/arenavm.c | 18 +++++++----------- mps/code/tree.h | 26 +++++++++++++++++--------- mps/design/arena.txt | 15 +++++++-------- 4 files changed, 33 insertions(+), 30 deletions(-) diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index 0129c63fea9..a288b420b20 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -290,7 +290,7 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) static void ClientArenaFinish(Arena arena) { ClientArena clientArena; - Tree *treeref, tree, next; + Tree *treeref, *nextref, tree, next; clientArena = Arena2ClientArena(arena); AVERT(ClientArena, clientArena); @@ -298,7 +298,7 @@ static void ClientArenaFinish(Arena arena) /* Destroy all chunks, including the primary. See * */ arena->primary = NULL; - TREE_DESTROY(treeref, tree, next, arena->chunkTree) { + TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, arena->chunkTree) { clientChunkDestroy(ChunkOfTree(tree)); } diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 538f50b134a..83f042e265c 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -589,7 +589,7 @@ static void VMArenaFinish(Arena arena) { VMArena vmArena; VM arenaVM; - Tree *treeref, tree, next; + Tree *treeref, *nextref, tree, next; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -600,7 +600,7 @@ static void VMArenaFinish(Arena arena) /* Destroy all chunks, including the primary. See * */ arena->primary = NULL; - TREE_DESTROY(treeref, tree, next, arena->chunkTree) { + TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, arena->chunkTree) { vmChunkDestroy(ChunkOfTree(tree)); } @@ -1088,7 +1088,7 @@ static void VMCompact(Arena arena, Trace trace) { VMArena vmArena; Size vmem1; - Tree *tree; + Tree *treeref, *nextref, tree, next; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -1099,25 +1099,21 @@ static void VMCompact(Arena arena, Trace trace) /* Destroy chunks that are completely free, but not the primary * chunk. See * TODO: add hysteresis here. See job003815. */ - tree = &arena->chunkTree; - TreeToVine(tree); - while (*tree != TreeEMPTY) { - Chunk chunk = ChunkOfTree(*tree); + TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, arena->chunkTree) { + Chunk chunk = ChunkOfTree(tree); AVERT(Chunk, chunk); if(chunk != arena->primary && BTIsResRange(chunk->allocTable, 0, chunk->pages)) { Addr base = chunk->base; Size size = ChunkSize(chunk); - Tree next = TreeRight(*tree); vmChunkDestroy(chunk); vmArena->contracted(arena, base, size); - *tree = next; } else { - tree = &(*tree)->right; + /* Keep this chunk. */ + treeref = nextref; } } - TreeBalance(&arena->chunkTree); { Size vmem0 = trace->preTraceArenaReserved; diff --git a/mps/code/tree.h b/mps/code/tree.h index 6d2fde3ea6f..9c6e2543f7e 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -134,18 +134,26 @@ extern Tree TreeReverseRightSpine(Tree tree); extern Count TreeToVine(Tree *treeIO); extern void TreeBalance(Tree *treeIO); -/* TREE_DESTROY -- iterate over a tree while destroying it. +/* TREE_TRAVERSE_AND_DELETE -- traverse a tree while deleting nodes * - * root is an lvalue storing the root of the tree. - * treeref is a variable of type Tree*. + * root is an lvalue storing a pointer to the root of the tree. It is + * evaluated twice. + * treeref and nextref are variable of type Tree*. * tree and next are variables of type Tree. - * In the body of the loop, tree is the current node. + * + * In the body of the loop, tree and next are the current and next + * node respectively, and treeref and nextref are the locations where + * pointers to these nodes are stored. Nodes are deleted from the tree + * by default, or you can assign treeref = nextref in the body of the + * loop to keep the current node. + * + * See . */ -#define TREE_DESTROY(treeref, tree, next, root) \ - for ((treeref = &(root), TreeToVine(treeref), next = TreeEMPTY); \ - (tree = *treeref) != TreeEMPTY \ - ? (next = tree->right, TRUE) \ - : FALSE; \ +#define TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, root) \ + for ((treeref = &(root), TreeToVine(treeref), next = TreeEMPTY); \ + (tree = *treeref) != TreeEMPTY \ + ? (nextref = &tree->right, next = *nextref, TRUE) \ + : (TreeBalance(&(root)), FALSE); \ *treeref = next) diff --git a/mps/design/arena.txt b/mps/design/arena.txt index 28dd6334065..cf243e2877d 100644 --- a/mps/design/arena.txt +++ b/mps/design/arena.txt @@ -246,21 +246,20 @@ _`.chunk.insert`: New chunks are inserted into the tree by calling _`.chunk.delete`: There is no corresponding function ``ArenaChunkDelete()``. Instead, deletions from the chunk tree are carried out by calling ``TreeToVine()``, iterating over the vine -(where deletion is straightforward, with care) and then calling -``TreeBalance()`` if any chunks might remain. The macro -``TREE_DESTROY()`` assists with the common case. +(where deletion is possible, if care is taken) and then calling +``TreeBalance()`` on the remaining tree. The macro +``TREE_TRAVERSE_AND_DELETE()`` assists with this. _`.chunk.delete.justify`: This is because we don't have a function that deletes an item from a balanced tree efficiently, and because all -functions that delete chunks do so in a loop that may delete multiple -chunks. The procedure is efficient when deleting all the chunks in -``ArenaFinish()``, and has acceptable performance in ``VMCompact()`` -where multiple chunks may be deleted from the tree. +functions that delete chunks do so in a loop over the chunks (so the +best we can do is O(*n*) time in any case). _`.chunk.delete.tricky`: Deleting chunks from the chunk tree is tricky in the virtual memory arena because ``vmChunkDestroy()`` unmaps the memory containing the chunk, which includes the tree node. So the next -chunk must be looked up before deleting the current chunk. +chunk must be looked up before deleting the current chunk. The macro +``TREE_TRAVERSE_AND_DELETE()`` helps get this right. Tracts From a066764cc86ed4eb8d0a1abab06f6b71d8e0e5b9 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 22 May 2014 13:05:40 +0100 Subject: [PATCH 12/24] Add -m command-line option to the scheme example so that we can test it with different initial arena sizes. Copied from Perforce Change: 186243 ServerID: perforce.ravenbrook.com --- mps/example/scheme/scheme.c | 47 ++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/mps/example/scheme/scheme.c b/mps/example/scheme/scheme.c index 8a8dcf48ed7..72fbe9f9ff4 100644 --- a/mps/example/scheme/scheme.c +++ b/mps/example/scheme/scheme.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -4246,7 +4247,7 @@ static int start(int argc, char *argv[]) mps_addr_t ref; mps_res_t res; mps_root_t globals_root; - int exit_code; + int exit_code = EXIT_SUCCESS; total = (size_t)0; @@ -4302,15 +4303,14 @@ static int start(int argc, char *argv[]) abort(); } - if(argc >= 2) { + if (argc > 0) { /* Non-interactive file execution */ - if(setjmp(*error_handler) != 0) { + if (setjmp(*error_handler) != 0) { fprintf(stderr, "%s\n", error_message); exit_code = EXIT_FAILURE; - } else { - load(env, op_env, make_string(strlen(argv[1]), argv[1])); - exit_code = EXIT_SUCCESS; - } + } else + for (i = 0; i < argc; ++i) + load(env, op_env, make_string(strlen(argv[i]), argv[i])); } else { /* Ask the MPS to tell us when it's garbage collecting so that we can print some messages. Completely optional. */ @@ -4376,6 +4376,7 @@ static mps_gen_param_s obj_gen_params[] = { int main(int argc, char *argv[]) { + size_t arenasize = 32ul * 1024 * 1024; mps_res_t res; mps_chain_t obj_chain; mps_fmt_t obj_fmt; @@ -4383,11 +4384,41 @@ int main(int argc, char *argv[]) mps_root_t reg_root; int exit_code; void *marker = ▮ + int ch; + while ((ch = getopt(argc, argv, "m:")) != -1) + switch (ch) { + case 'm': { + char *p; + arenasize = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arenasize <<= 30; break; + case 'M': arenasize <<= 20; break; + case 'K': arenasize <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; + default: + fprintf(stderr, + "Usage: %s [option...] [file...]\n" + "Options:\n" + " -m n, --arena-size=n[KMG]?\n" + " Initial size of arena (default %lu).\n", + argv[0], + (unsigned long)arenasize); + return EXIT_FAILURE; + } + argc -= optind; + argv += optind; + /* Create an MPS arena. There is usually only one of these in a process. It holds all the MPS "global" state and is where everything happens. */ MPS_ARGS_BEGIN(args) { - MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 32 * 1024 * 1024); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arenasize); res = mps_arena_create_k(&arena, mps_arena_class_vm(), args); } MPS_ARGS_END(args); if (res != MPS_RES_OK) error("Couldn't create arena"); From 4dc70cba7cbf72e19535c2596d08759827eac489 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 22 May 2014 13:16:29 +0100 Subject: [PATCH 13/24] All versions of the scheme interpreter now take multiple files on the command line. Integrate change 186243 to scheme-{advanced,malloc,boehm}.c. Copied from Perforce Change: 186244 ServerID: perforce.ravenbrook.com --- mps/example/scheme/scheme-advanced.c | 48 ++++++++++++++++++++++------ mps/example/scheme/scheme-malloc.c | 3 +- mps/example/scheme/scheme.c | 1 - 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/mps/example/scheme/scheme-advanced.c b/mps/example/scheme/scheme-advanced.c index 476b790f0a4..28afdcc0671 100644 --- a/mps/example/scheme/scheme-advanced.c +++ b/mps/example/scheme/scheme-advanced.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -4318,7 +4319,7 @@ static int start(int argc, char *argv[]) mps_addr_t ref; mps_res_t res; mps_root_t globals_root; - int exit_code; + int exit_code = EXIT_SUCCESS; total = (size_t)0; error_handler = &jb; @@ -4372,15 +4373,14 @@ static int start(int argc, char *argv[]) abort(); } - if(argc >= 2) { + if (argc > 0) { /* Non-interactive file execution */ - if(setjmp(*error_handler) != 0) { + if (setjmp(*error_handler) != 0) { fprintf(stderr, "%s\n", error_message); exit_code = EXIT_FAILURE; - } else { - load(env, op_env, make_string(strlen(argv[1]), argv[1])); - exit_code = EXIT_SUCCESS; - } + } else + for (i = 0; i < argc; ++i) + load(env, op_env, make_string(strlen(argv[i]), argv[i])); } else { /* Ask the MPS to tell us when it's garbage collecting so that we can print some messages. Completely optional. */ @@ -4409,7 +4409,6 @@ static int start(int argc, char *argv[]) } } puts("Bye."); - exit_code = EXIT_SUCCESS; } /* See comment at the end of `main` about cleaning up. */ @@ -4442,6 +4441,7 @@ static mps_gen_param_s obj_gen_params[] = { int main(int argc, char *argv[]) { + size_t arenasize = 32ul * 1024 * 1024; mps_res_t res; mps_chain_t obj_chain; mps_fmt_t obj_fmt, buckets_fmt; @@ -4449,11 +4449,41 @@ int main(int argc, char *argv[]) mps_root_t reg_root; int exit_code; void *marker = ▮ + int ch; + while ((ch = getopt(argc, argv, "m:")) != -1) + switch (ch) { + case 'm': { + char *p; + arenasize = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arenasize <<= 30; break; + case 'M': arenasize <<= 20; break; + case 'K': arenasize <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; + default: + fprintf(stderr, + "Usage: %s [option...] [file...]\n" + "Options:\n" + " -m n, --arena-size=n[KMG]?\n" + " Initial size of arena (default %lu).\n", + argv[0], + (unsigned long)arenasize); + return EXIT_FAILURE; + } + argc -= optind; + argv += optind; + /* Create an MPS arena. There is usually only one of these in a process. It holds all the MPS "global" state and is where everything happens. */ MPS_ARGS_BEGIN(args) { - MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 32 * 1024 * 1024); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arenasize); res = mps_arena_create_k(&arena, mps_arena_class_vm(), args); } MPS_ARGS_END(args); if (res != MPS_RES_OK) error("Couldn't create arena"); diff --git a/mps/example/scheme/scheme-malloc.c b/mps/example/scheme/scheme-malloc.c index 1333ce73aef..2b27c50ab0f 100644 --- a/mps/example/scheme/scheme-malloc.c +++ b/mps/example/scheme/scheme-malloc.c @@ -3608,7 +3608,8 @@ int main(int argc, char *argv[]) fprintf(stderr, "%s\n", error_message); return EXIT_FAILURE; } - load(env, op_env, argv[1]); + for (i = 1; i < argc; ++i) + load(env, op_env, argv[i]); return EXIT_SUCCESS; } else { /* Interactive read-eval-print loop */ diff --git a/mps/example/scheme/scheme.c b/mps/example/scheme/scheme.c index 72fbe9f9ff4..b3bc0c6801d 100644 --- a/mps/example/scheme/scheme.c +++ b/mps/example/scheme/scheme.c @@ -4339,7 +4339,6 @@ static int start(int argc, char *argv[]) } } puts("Bye."); - exit_code = EXIT_SUCCESS; } /* See comment at the end of `main` about cleaning up. */ From 299bfb2992027c6131c6166db15e37548a766a60 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 22 May 2014 15:38:02 +0100 Subject: [PATCH 14/24] Scheme-boehm also processes multiple files. Copied from Perforce Change: 186245 ServerID: perforce.ravenbrook.com --- mps/example/scheme/scheme-boehm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mps/example/scheme/scheme-boehm.c b/mps/example/scheme/scheme-boehm.c index 8d039433bb4..8ac42699512 100644 --- a/mps/example/scheme/scheme-boehm.c +++ b/mps/example/scheme/scheme-boehm.c @@ -3611,7 +3611,8 @@ int main(int argc, char *argv[]) fprintf(stderr, "%s\n", error_message); return EXIT_FAILURE; } - load(env, op_env, argv[1]); + for (i = 1; i < argc; ++i) + load(env, op_env, argv[i]); return EXIT_SUCCESS; } else { /* Interactive read-eval-print loop */ From 220e23a758ffcc61006f2ff55ad6b24ef79f20d4 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Sun, 8 Jun 2014 17:45:44 +0100 Subject: [PATCH 15/24] Fix problems identified by dl in review . Copied from Perforce Change: 186445 ServerID: perforce.ravenbrook.com --- mps/code/tract.c | 14 +++++++++++++- mps/code/tree.c | 6 +++--- mps/code/tree.h | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/mps/code/tract.c b/mps/code/tract.c index 2a688bf5ea3..c40eb3ac784 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -7,6 +7,17 @@ * free but never allocated as alloc starts searching after the tables. * TractOfAddr uses the fact that these pages are marked as free in order * to detect "references" to these pages as being bogus. + * + * .chunk.at.base: The chunks are stored in a balanced binary tree. + * Looking up an address in this tree is on the critical path, and + * therefore vital that it runs quickly. It is an implementation + * detail of chunks that they are always stored at the base of the + * region of address space they represent. Thus chunk happens to + * always be the same as chunk->base. We take advantage of this in the + * tree search by using chunk as its own key (instead of looking up + * chunk->base): this saves a dereference and perhaps a cache miss. + * See ChunkKey and ChunkCompare for this optimization. The necessary + * property is asserted in ChunkCheck. */ #include "tract.h" @@ -117,7 +128,7 @@ Bool ChunkCheck(Chunk chunk) CHECKL(chunk->base != (Addr)0); CHECKL(chunk->base < chunk->limit); - /* .chunk.at.base: check chunk structure is at its own base */ + /* check chunk structure is at its own base: see .chunk.at.base. */ CHECKL(chunk->base == (Addr)chunk); CHECKL((Addr)(chunk+1) <= chunk->limit); CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages); @@ -276,6 +287,7 @@ Compare ChunkCompare(Tree tree, TreeKey key) AVERT_CRITICAL(Tree, tree); AVER_CRITICAL(tree != TreeEMPTY); + /* See .chunk.at.base. */ chunk = ChunkOfTree(tree); AVERT_CRITICAL(Chunk, chunk); diff --git a/mps/code/tree.c b/mps/code/tree.c index dbf112f31bc..7936383b5a9 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -85,9 +85,9 @@ Compare TreeFind(Tree *treeReturn, Tree root, TreeKey key, TreeCompare compare) Tree node, parent; Compare cmp = CompareEQUAL; - AVERT(Tree, root); - AVER(treeReturn != NULL); - AVER(FUNCHECK(compare)); + AVERT_CRITICAL(Tree, root); + AVER_CRITICAL(treeReturn != NULL); + AVER_CRITICAL(FUNCHECK(compare)); /* key is arbitrary */ parent = NULL; diff --git a/mps/code/tree.h b/mps/code/tree.h index 9c6e2543f7e..e7f70d8efd4 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -138,7 +138,7 @@ extern void TreeBalance(Tree *treeIO); * * root is an lvalue storing a pointer to the root of the tree. It is * evaluated twice. - * treeref and nextref are variable of type Tree*. + * treeref and nextref are variables of type Tree*. * tree and next are variables of type Tree. * * In the body of the loop, tree and next are the current and next From 208768896427848cf003a279b4131663ef5e0bd2 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Sun, 8 Jun 2014 22:10:21 +0100 Subject: [PATCH 16/24] Fix problems identified by rb in review . Copied from Perforce Change: 186451 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 16 +++++++++-- mps/code/arenacl.c | 19 +++++++++---- mps/code/arenavm.c | 68 ++++++++++++++++++++++++++++++-------------- mps/code/tract.c | 1 - mps/code/tree.c | 35 +++++++++++++++++++++++ mps/code/tree.h | 24 ++-------------- mps/design/arena.txt | 8 +++--- 7 files changed, 115 insertions(+), 56 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 89abc7e0e7c..8491c1976dd 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -638,6 +638,7 @@ typedef struct ArenaAllocPageClosureStruct { Pool pool; Addr base; Chunk avoid; + Res res; } ArenaAllocPageClosureStruct, *ArenaAllocPageClosure; static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) @@ -656,18 +657,25 @@ static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) UNUSED(closureS); /* Already searched in arenaAllocPage. */ - if (chunk == cl->avoid) + if (chunk == cl->avoid) { + cl->res = ResRESOURCE; return TRUE; + } if (!BTFindShortResRange(&basePageIndex, &limitPageIndex, chunk->allocTable, chunk->allocBase, chunk->pages, 1)) + { + cl->res = ResRESOURCE; return TRUE; + } res = (*cl->arena->class->pagesMarkAllocated)(cl->arena, chunk, basePageIndex, 1, cl->pool); - if (res != ResOK) + if (res != ResOK) { + cl->res = res; return TRUE; + } cl->base = PageIndexBase(chunk, basePageIndex); return FALSE; @@ -685,6 +693,7 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) closure.pool = pool; closure.base = NULL; closure.avoid = NULL; + closure.res = ResOK; /* Favour the primary chunk, because pages allocated this way aren't currently freed, and we don't want to prevent chunks being destroyed. */ @@ -697,7 +706,8 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) arenaAllocPageInChunk, &closure, 0)) goto found; - return ResRESOURCE; + AVER(closure.res != ResOK); + return closure.res; found: AVER(closure.base != NULL); diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index a288b420b20..58d1abce445 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -173,15 +173,26 @@ static Res ClientChunkInit(Chunk chunk, BootBlock boot) /* clientChunkDestroy -- destroy a ClientChunk */ -static void clientChunkDestroy(Chunk chunk) +static Bool clientChunkDestroy(Tree tree, void *closureP, Size closureS) { + Chunk chunk; ClientChunk clChunk; + AVERT(Tree, tree); + /* FIXME: AVER(closureP == UNUSED_POINTER); */ + UNUSED(closureP); + /* FIXME: AVER(closureS == UNUSED_SIZE); */ + UNUSED(closureS); + + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); clChunk = Chunk2ClientChunk(chunk); AVERT(ClientChunk, clChunk); clChunk->sig = SigInvalid; ChunkFinish(chunk); + + return TRUE; } @@ -290,7 +301,6 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) static void ClientArenaFinish(Arena arena) { ClientArena clientArena; - Tree *treeref, *nextref, tree, next; clientArena = Arena2ClientArena(arena); AVERT(ClientArena, clientArena); @@ -298,9 +308,8 @@ static void ClientArenaFinish(Arena arena) /* Destroy all chunks, including the primary. See * */ arena->primary = NULL; - TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, arena->chunkTree) { - clientChunkDestroy(ChunkOfTree(tree)); - } + /* FIXME: use UNUSED_POINTER, UNUSED_SIZE instead of NULL, 0 */ + TreeTraverseAndDelete(&arena->chunkTree, clientChunkDestroy, NULL, 0); clientArena->sig = SigInvalid; diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 83f042e265c..800e7aeca21 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -401,11 +401,19 @@ static Res VMChunkInit(Chunk chunk, BootBlock boot) /* vmChunkDestroy -- destroy a VMChunk */ -static void vmChunkDestroy(Chunk chunk) +static Bool vmChunkDestroy(Tree tree, void *closureP, Size closureS) { VM vm; + Chunk chunk; VMChunk vmChunk; + AVERT(Tree, tree); + /* FIXME: AVER(closureP == UNUSED_POINTER); */ + UNUSED(closureP); + /* FIXME: AVER(closureS == UNUSED_SIZE); */ + UNUSED(closureS); + + chunk = ChunkOfTree(tree); AVERT(Chunk, chunk); vmChunk = Chunk2VMChunk(chunk); AVERT(VMChunk, vmChunk); @@ -418,6 +426,8 @@ static void vmChunkDestroy(Chunk chunk) vm = vmChunk->vm; ChunkFinish(chunk); VMDestroy(vm); + + return TRUE; } @@ -589,7 +599,6 @@ static void VMArenaFinish(Arena arena) { VMArena vmArena; VM arenaVM; - Tree *treeref, *nextref, tree, next; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -600,9 +609,8 @@ static void VMArenaFinish(Arena arena) /* Destroy all chunks, including the primary. See * */ arena->primary = NULL; - TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, arena->chunkTree) { - vmChunkDestroy(ChunkOfTree(tree)); - } + /* FIXME: use UNUSED_POINTER, UNUSED_SIZE instead of NULL, 0 */ + TreeTraverseAndDelete(&arena->chunkTree, vmChunkDestroy, NULL, 0); /* Destroying the chunks should have purged and removed all spare pages. */ RingFinish(&vmArena->spareRing); @@ -1084,11 +1092,42 @@ static void VMFree(Addr base, Size size, Pool pool) } +/* vmChunkCompact -- delete chunk if empty and not primary */ + +static Bool vmChunkCompact(Tree tree, void *closureP, Size closureS) +{ + Chunk chunk; + Arena arena = closureP; + VMArena vmArena; + + AVERT(Tree, tree); + AVERT(Arena, arena); + /* FIXME: AVER(closureS == UNUSED_SIZE); */ + UNUSED(closureS); + + vmArena = Arena2VMArena(arena); + AVERT(VMArena, vmArena); + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); + if(chunk != arena->primary + && BTIsResRange(chunk->allocTable, 0, chunk->pages)) + { + Addr base = chunk->base; + Size size = ChunkSize(chunk); + vmChunkDestroy(tree, closureP, closureS); + vmArena->contracted(arena, base, size); + return TRUE; + } else { + /* Keep this chunk. */ + return FALSE; + } +} + + static void VMCompact(Arena arena, Trace trace) { VMArena vmArena; Size vmem1; - Tree *treeref, *nextref, tree, next; vmArena = Arena2VMArena(arena); AVERT(VMArena, vmArena); @@ -1099,21 +1138,8 @@ static void VMCompact(Arena arena, Trace trace) /* Destroy chunks that are completely free, but not the primary * chunk. See * TODO: add hysteresis here. See job003815. */ - TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, arena->chunkTree) { - Chunk chunk = ChunkOfTree(tree); - AVERT(Chunk, chunk); - if(chunk != arena->primary - && BTIsResRange(chunk->allocTable, 0, chunk->pages)) - { - Addr base = chunk->base; - Size size = ChunkSize(chunk); - vmChunkDestroy(chunk); - vmArena->contracted(arena, base, size); - } else { - /* Keep this chunk. */ - treeref = nextref; - } - } + /* FIXME: use UNUSED_SIZE instead of 0 */ + TreeTraverseAndDelete(&arena->chunkTree, vmChunkCompact, arena, 0); { Size vmem0 = trace->preTraceArenaReserved; diff --git a/mps/code/tract.c b/mps/code/tract.c index c40eb3ac784..ec159458e25 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -520,7 +520,6 @@ static Bool tractSearch(Tract *tractReturn, Arena arena, Addr addr) } } while (chunkAboveAddr(&chunk, arena, addr)) { - /* If the ring was kept in address order, this could be improved. */ addr = chunk->base; /* Start from allocBase to skip the tables. */ if (tractSearchInChunk(tractReturn, chunk, chunk->allocBase)) { diff --git a/mps/code/tree.c b/mps/code/tree.c index 7936383b5a9..e6dd902f3fd 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -528,6 +528,41 @@ void TreeBalance(Tree *treeIO) } +/* TreeTraverseAndDelete -- traverse a tree while deleting nodes + * + * The visitor function must return TRUE to delete the current node, + * or FALSE to keep it. + * + * See . + */ +void TreeTraverseAndDelete(Tree *treeIO, TreeVisitor visitor, + void *closureP, Size closureS) +{ + Tree *treeref = treeIO; + + AVER(treeIO != NULL); + AVERT(Tree, *treeIO); + AVER(FUNCHECK(visitor)); + /* closureP and closureS are arbitrary */ + + TreeToVine(treeIO); + + while (*treeref != TreeEMPTY) { + Tree tree = *treeref; /* Current node. */ + Tree *nextref = &tree->right; /* Location of pointer to next node. */ + Tree next = *nextref; /* Next node. */ + if ((*visitor)(tree, closureP, closureS)) { + /* Delete current node. */ + *treeref = next; + } else { + /* Keep current node. */ + treeref = nextref; + } + } + TreeBalance(treeIO); +} + + /* C. COPYRIGHT AND LICENSE * * Copyright (C) 2014 Ravenbrook Limited . diff --git a/mps/code/tree.h b/mps/code/tree.h index e7f70d8efd4..2b5636fd087 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -134,28 +134,8 @@ extern Tree TreeReverseRightSpine(Tree tree); extern Count TreeToVine(Tree *treeIO); extern void TreeBalance(Tree *treeIO); -/* TREE_TRAVERSE_AND_DELETE -- traverse a tree while deleting nodes - * - * root is an lvalue storing a pointer to the root of the tree. It is - * evaluated twice. - * treeref and nextref are variables of type Tree*. - * tree and next are variables of type Tree. - * - * In the body of the loop, tree and next are the current and next - * node respectively, and treeref and nextref are the locations where - * pointers to these nodes are stored. Nodes are deleted from the tree - * by default, or you can assign treeref = nextref in the body of the - * loop to keep the current node. - * - * See . - */ -#define TREE_TRAVERSE_AND_DELETE(treeref, nextref, tree, next, root) \ - for ((treeref = &(root), TreeToVine(treeref), next = TreeEMPTY); \ - (tree = *treeref) != TreeEMPTY \ - ? (nextref = &tree->right, next = *nextref, TRUE) \ - : (TreeBalance(&(root)), FALSE); \ - *treeref = next) - +extern void TreeTraverseAndDelete(Tree *treeIO, TreeVisitor visitor, + void *closureP, Size closureS); #endif /* tree_h */ diff --git a/mps/design/arena.txt b/mps/design/arena.txt index cf243e2877d..980d716f229 100644 --- a/mps/design/arena.txt +++ b/mps/design/arena.txt @@ -247,8 +247,8 @@ _`.chunk.delete`: There is no corresponding function ``ArenaChunkDelete()``. Instead, deletions from the chunk tree are carried out by calling ``TreeToVine()``, iterating over the vine (where deletion is possible, if care is taken) and then calling -``TreeBalance()`` on the remaining tree. The macro -``TREE_TRAVERSE_AND_DELETE()`` assists with this. +``TreeBalance()`` on the remaining tree. The function +``TreeTraverseAndDelete()`` implements this. _`.chunk.delete.justify`: This is because we don't have a function that deletes an item from a balanced tree efficiently, and because all @@ -258,8 +258,8 @@ best we can do is O(*n*) time in any case). _`.chunk.delete.tricky`: Deleting chunks from the chunk tree is tricky in the virtual memory arena because ``vmChunkDestroy()`` unmaps the memory containing the chunk, which includes the tree node. So the next -chunk must be looked up before deleting the current chunk. The macro -``TREE_TRAVERSE_AND_DELETE()`` helps get this right. +chunk must be looked up before deleting the current chunk. The function +``TreeTraverseAndDelete()`` ensures that this is done. Tracts From a331b13075b76e5e0c7aa6bc77d4042b0980c08d Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Wed, 11 Jun 2014 13:59:02 +0100 Subject: [PATCH 17/24] Use unused_pointer and unused_size now we have 'em. Copied from Perforce Change: 186492 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 1 + mps/code/arenacl.c | 8 ++++---- mps/code/arenavm.c | 16 ++++++++-------- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 5101908875b..12d7af1137b 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -388,6 +388,7 @@ void ArenaDestroy(Arena arena) LandFinish(ArenaFreeLand(arena)); /* The CBS block pool can't free its own memory via ArenaFree because + that would use the ZonedCBS. */ MFSFinishTracts(ArenaCBSBlockPool(arena), arenaMFSPageFreeVisitor, UNUSED_POINTER, UNUSED_SIZE); diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index 58d1abce445..cb4d9865ae3 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -179,9 +179,9 @@ static Bool clientChunkDestroy(Tree tree, void *closureP, Size closureS) ClientChunk clChunk; AVERT(Tree, tree); - /* FIXME: AVER(closureP == UNUSED_POINTER); */ + AVER(closureP == UNUSED_POINTER); UNUSED(closureP); - /* FIXME: AVER(closureS == UNUSED_SIZE); */ + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); chunk = ChunkOfTree(tree); @@ -308,8 +308,8 @@ static void ClientArenaFinish(Arena arena) /* Destroy all chunks, including the primary. See * */ arena->primary = NULL; - /* FIXME: use UNUSED_POINTER, UNUSED_SIZE instead of NULL, 0 */ - TreeTraverseAndDelete(&arena->chunkTree, clientChunkDestroy, NULL, 0); + TreeTraverseAndDelete(&arena->chunkTree, clientChunkDestroy, + UNUSED_POINTER, UNUSED_SIZE); clientArena->sig = SigInvalid; diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 800e7aeca21..73ea711f39c 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -408,9 +408,9 @@ static Bool vmChunkDestroy(Tree tree, void *closureP, Size closureS) VMChunk vmChunk; AVERT(Tree, tree); - /* FIXME: AVER(closureP == UNUSED_POINTER); */ + AVER(closureP == UNUSED_POINTER); UNUSED(closureP); - /* FIXME: AVER(closureS == UNUSED_SIZE); */ + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); chunk = ChunkOfTree(tree); @@ -609,8 +609,8 @@ static void VMArenaFinish(Arena arena) /* Destroy all chunks, including the primary. See * */ arena->primary = NULL; - /* FIXME: use UNUSED_POINTER, UNUSED_SIZE instead of NULL, 0 */ - TreeTraverseAndDelete(&arena->chunkTree, vmChunkDestroy, NULL, 0); + TreeTraverseAndDelete(&arena->chunkTree, vmChunkDestroy, + UNUSED_POINTER, UNUSED_SIZE); /* Destroying the chunks should have purged and removed all spare pages. */ RingFinish(&vmArena->spareRing); @@ -1102,7 +1102,7 @@ static Bool vmChunkCompact(Tree tree, void *closureP, Size closureS) AVERT(Tree, tree); AVERT(Arena, arena); - /* FIXME: AVER(closureS == UNUSED_SIZE); */ + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); vmArena = Arena2VMArena(arena); @@ -1114,7 +1114,7 @@ static Bool vmChunkCompact(Tree tree, void *closureP, Size closureS) { Addr base = chunk->base; Size size = ChunkSize(chunk); - vmChunkDestroy(tree, closureP, closureS); + vmChunkDestroy(tree, UNUSED_POINTER, UNUSED_SIZE); vmArena->contracted(arena, base, size); return TRUE; } else { @@ -1138,8 +1138,8 @@ static void VMCompact(Arena arena, Trace trace) /* Destroy chunks that are completely free, but not the primary * chunk. See * TODO: add hysteresis here. See job003815. */ - /* FIXME: use UNUSED_SIZE instead of 0 */ - TreeTraverseAndDelete(&arena->chunkTree, vmChunkCompact, arena, 0); + TreeTraverseAndDelete(&arena->chunkTree, vmChunkCompact, arena, + UNUSED_SIZE); { Size vmem0 = trace->preTraceArenaReserved; From 17070893194d406b845dd76b7c41100a342450e9 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Wed, 11 Jun 2014 14:02:22 +0100 Subject: [PATCH 18/24] Oops, fumbled the merge. Copied from Perforce Change: 186493 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 12d7af1137b..73703277f07 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -388,7 +388,7 @@ void ArenaDestroy(Arena arena) LandFinish(ArenaFreeLand(arena)); /* The CBS block pool can't free its own memory via ArenaFree because - that would use the ZonedCBS. */ + that would use the freeLand. */ MFSFinishTracts(ArenaCBSBlockPool(arena), arenaMFSPageFreeVisitor, UNUSED_POINTER, UNUSED_SIZE); From cf75884793476fd37e4c47c1c58950138bc13e0e Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 12 Jun 2014 12:18:30 +0100 Subject: [PATCH 19/24] Refer to rb's e-mail for potential optimization. Copied from Perforce Change: 186524 ServerID: perforce.ravenbrook.com --- mps/code/trace.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mps/code/trace.c b/mps/code/trace.c index bb2fea2c875..56abf698e39 100644 --- a/mps/code/trace.c +++ b/mps/code/trace.c @@ -1286,7 +1286,13 @@ mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io) * but inlined so that we can distinguish between "not pointing to * chunk" and "pointing to chunk but not to tract" so that we can * check the rank in the latter case. See - * */ + * + * + * If compilers fail to do a good job of inlining ChunkOfAddr and + * TreeFind then it may become necessary to inline at least the + * comparison against the root of the tree. See + * + */ if (!ChunkOfAddr(&chunk, ss->arena, ref)) /* Reference points outside MPS-managed address space: ignore. */ goto done; From 1d9afa915f72c070c8e9fde0799fca571ace633e Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 12 Jun 2014 18:46:49 +0100 Subject: [PATCH 20/24] Fix arenadescribetracts: * Return value from TreeVisitor is Bool, not Res, so pass Res back via a closure. * Can't use TRACT_TRACT_FOR while iterating over the chunk tree, because that macro uses ChunkOfAddr. (A plain loop is simpler.) * Mustn't try to describe unallocated tracts -- they might not even be mapped into memory. So consult the allocTable. Make tract functions more robust: * TractCheck must only check the pool if there is one (otherwise it segfaults for unallocated tracts) * TractLimit can't look up the arena via TractPool, because the tract might not have a pool. So pass in the arena as an argument. Copied from Perforce Change: 186547 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 74 ++++++++++++++++++++++++++++++++---------------- mps/code/tract.c | 12 ++++---- mps/code/tract.h | 4 ++- 3 files changed, 58 insertions(+), 32 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 0700b594880..172ef9c671a 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -496,45 +496,66 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth) } -/* ArenaDescribeTractsInChunk -- describe the tracts in a chunk */ +/* arenaDescribeTractsInChunk -- describe the tracts in a chunk */ + +typedef struct ArenaDescribeTractsClosureStruct { + mps_lib_FILE *stream; + Res res; +} ArenaDescribeTractsClosureStruct, *ArenaDescribeTractsClosure; static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) { - mps_lib_FILE *stream = closureP; + ArenaDescribeTractsClosure cl = closureP; Count depth = closureS; + mps_lib_FILE *stream; Chunk chunk; - Tract tract; - Addr addr; - Res res; + Index pi; + Res res = ResFAIL; + if (closureP == NULL) return FALSE; chunk = ChunkOfTree(tree); - if (!TESTT(Chunk, chunk)) return ResFAIL; - if (stream == NULL) return ResFAIL; + if (!TESTT(Chunk, chunk)) goto fail; + stream = cl->stream; + if (stream == NULL) goto fail; res = WriteF(stream, depth, "Chunk [$P, $P) ($U) {\n", (WriteFP)chunk->base, (WriteFP)chunk->limit, (WriteFU)chunk->serial, NULL); - if (res != ResOK) return res; - - TRACT_TRACT_FOR(tract, addr, ChunkArena(chunk), - PageTract(ChunkPage(chunk, chunk->allocBase)), - chunk->limit) - { - res = WriteF(stream, depth + 2, - "[$P, $P) $P $U ($S)\n", - (WriteFP)TractBase(tract), (WriteFP)TractLimit(tract), - (WriteFP)TractPool(tract), - (WriteFU)(TractPool(tract)->serial), - (WriteFS)(TractPool(tract)->class->name), - NULL); - if (res != ResOK) return res; + if (res != ResOK) goto fail; + + for (pi = chunk->allocBase; pi < chunk->pages; ++pi) { + if (BTGet(chunk->allocTable, pi)) { + Tract tract = PageTract(ChunkPage(chunk, pi)); + res = WriteF(stream, depth + 2, "[$P, $P)", + (WriteFP)TractBase(tract), + (WriteFP)TractLimit(tract, ChunkArena(chunk)), + NULL); + if (res != ResOK) goto fail; + if (TractHasPool(tract)) { + Pool pool = TractPool(tract); + res = WriteF(stream, 0, " $P $U ($S)", + (WriteFP)pool, + (WriteFU)(pool->serial), + (WriteFS)(pool->class->name), + NULL); + if (res != ResOK) goto fail; + } + res = WriteF(stream, 0, "\n", NULL); + if (res != ResOK) goto fail; + } } res = WriteF(stream, depth, "} Chunk [$P, $P)\n", (WriteFP)chunk->base, (WriteFP)chunk->limit, NULL); - return res; + if (res != ResOK) goto fail; + + return TRUE; + +fail: + cl->res = res; + return FALSE; } @@ -542,13 +563,16 @@ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth) { + ArenaDescribeTractsClosureStruct cl; + if (!TESTT(Arena, arena)) return ResFAIL; if (stream == NULL) return ResFAIL; + cl.stream = stream; + cl.res = ResOK; (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, - arenaDescribeTractsInChunk, stream, depth); - - return ResOK; + arenaDescribeTractsInChunk, &cl, depth); + return cl.res; } diff --git a/mps/code/tract.c b/mps/code/tract.c index 834cb50a9f3..51a5d8dcdb1 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -37,8 +37,10 @@ SRCID(tract, "$Id$"); Bool TractCheck(Tract tract) { - CHECKU(Pool, TractPool(tract)); - CHECKL(AddrIsAligned(TractBase(tract), ArenaAlign(TractArena(tract)))); + if (TractHasPool(tract)) { + CHECKU(Pool, TractPool(tract)); + CHECKL(AddrIsAligned(TractBase(tract), ArenaAlign(TractArena(tract)))); + } if (TractHasSeg(tract)) { CHECKL(TraceSetCheck(TractWhite(tract))); CHECKU(Seg, (Seg)TractP(tract)); @@ -99,13 +101,11 @@ Addr (TractBase)(Tract tract) } -/* TractLimit -- return the limit address of a segment */ +/* TractLimit -- return the limit address of a tract */ -Addr TractLimit(Tract tract) +Addr TractLimit(Tract tract, Arena arena) { - Arena arena; AVERT_CRITICAL(Tract, tract); /* .tract.critical */ - arena = TractArena(tract); AVERT_CRITICAL(Arena, arena); return AddrAdd(TractBase(tract), arena->alignment); } diff --git a/mps/code/tract.h b/mps/code/tract.h index 0d6b5d87535..3def4d0a3d0 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -51,8 +51,10 @@ typedef struct TractStruct { /* Tract structure */ extern Addr (TractBase)(Tract tract); #define TractBase(tract) ((tract)->base) -extern Addr TractLimit(Tract tract); +extern Addr TractLimit(Tract tract, Arena arena); +#define TractHasPool(tract) \ + ((tract)->pool.state == PageStateALLOC && TractPool(tract)) #define TractPool(tract) ((tract)->pool.pool) #define TractP(tract) ((tract)->p) #define TractSetP(tract, pp) ((void)((tract)->p = (pp))) From b240945cb95a2b3d3b26be75d5456e61b66d5131 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 12 Jun 2014 19:06:10 +0100 Subject: [PATCH 21/24] Tract iteration interface is only used by the arena coverage test, so move it out of tract.c and into arenacv.c. Copied from Perforce Change: 186548 ServerID: perforce.ravenbrook.com --- mps/code/arenacv.c | 71 +++++++++++++++++++++++- mps/code/tract.c | 131 --------------------------------------------- mps/code/tract.h | 4 -- 3 files changed, 69 insertions(+), 137 deletions(-) diff --git a/mps/code/arenacv.c b/mps/code/arenacv.c index fada3b82692..110b4ff4ab3 100644 --- a/mps/code/arenacv.c +++ b/mps/code/arenacv.c @@ -87,6 +87,73 @@ typedef struct AllocatorClassStruct { } AllocatorClassStruct; +/* tractSearchInChunk -- find a tract in a chunk + * + * .tract-search: Searches for a tract in the chunk starting at page + * index i, return FALSE if there is none. + */ + +static Bool tractSearchInChunk(Tract *tractReturn, Chunk chunk, Index i) +{ + AVER_CRITICAL(chunk->allocBase <= i); + AVER_CRITICAL(i <= chunk->pages); + + while (i < chunk->pages + && !(BTGet(chunk->allocTable, i) + && PageIsAllocated(ChunkPage(chunk, i)))) { + ++i; + } + if (i == chunk->pages) + return FALSE; + AVER(i < chunk->pages); + *tractReturn = PageTract(ChunkPage(chunk, i)); + return TRUE; +} + + +/* tractSearch -- find next tract above address + * + * Searches for the next tract in increasing address order. + * The tract returned is the next one along from addr (i.e., + * it has a base address bigger than addr and no other tract + * with a base address bigger than addr has a smaller base address). + * + * Returns FALSE if there is no tract to find (end of the arena). + */ + +static Bool tractSearch(Tract *tractReturn, Arena arena, Addr addr) +{ + Bool b; + Chunk chunk; + Tree tree; + + b = ChunkOfAddr(&chunk, arena, addr); + if (b) { + Index i; + + i = INDEX_OF_ADDR(chunk, addr); + /* There are fewer pages than addresses, therefore the */ + /* page index can never wrap around */ + AVER_CRITICAL(i+1 != 0); + + if (tractSearchInChunk(tractReturn, chunk, i+1)) { + return TRUE; + } + } + while (TreeFindNext(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), + ChunkCompare)) + { + chunk = ChunkOfTree(tree); + addr = chunk->base; + /* Start from allocBase to skip the tables. */ + if (tractSearchInChunk(tractReturn, chunk, chunk->allocBase)) { + return TRUE; + } + } + return FALSE; +} + + /* Implementation of the tract-based interchangability interface */ static Res allocAsTract(AllocInfoStruct *aiReturn, SegPref pref, @@ -114,7 +181,7 @@ static Bool firstAsTract(AllocInfoStruct *aiReturn, Arena arena) { Bool res; Tract tract; - res = TractFirst(&tract, arena); + res = tractSearch(&tract, arena, 0); if (res) { aiReturn->the.tractData.base = TractBase(tract); aiReturn->the.tractData.size = ArenaAlign(arena);; @@ -128,7 +195,7 @@ static Bool nextAsTract(AllocInfoStruct *nextReturn, AllocInfo ai, { Bool res; Tract tract; - res = TractNext(&tract, arena, ai->the.tractData.base); + res = tractSearch(&tract, arena, ai->the.tractData.base); if (res) { nextReturn->the.tractData.base = TractBase(tract); nextReturn->the.tractData.size = ArenaAlign(arena);; diff --git a/mps/code/tract.c b/mps/code/tract.c index 51a5d8dcdb1..0f9084d6345 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -337,34 +337,6 @@ Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) } -/* chunkAboveAddr - * - * Finds the next higher chunk in memory which does _not_ contain - * addr. If there is such a chunk, update *chunkReturn and return - * TRUE, otherwise return FALSE. - */ - -static Bool chunkAboveAddr(Chunk *chunkReturn, Arena arena, Addr addr) -{ - Tree tree; - Chunk chunk; - - AVER_CRITICAL(chunkReturn != NULL); - AVERT_CRITICAL(Arena, arena); - /* addr is arbitrary */ - - if (TreeFindNext(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), - ChunkCompare)) - { - chunk = ChunkOfTree(tree); - AVER_CRITICAL(addr < chunk->base); - *chunkReturn = chunk; - return TRUE; - } - return FALSE; -} - - /* IndexOfAddr -- return the index of the page containing an address * * Function version of INDEX_OF_ADDR, for debugging purposes. @@ -465,109 +437,6 @@ Tract TractOfBaseAddr(Arena arena, Addr addr) } -/* tractSearchInChunk -- search for a tract - * - * .tract-search: Searches for a tract in the chunk starting at page - * index i, return NULL if there is none. .tract-search.private: This - * function is private to this module and is used in the tract iteration - * protocol (TractFirst and TractNext). - */ - -static Bool tractSearchInChunk(Tract *tractReturn, Chunk chunk, Index i) -{ - AVER_CRITICAL(chunk->allocBase <= i); - AVER_CRITICAL(i <= chunk->pages); - - while (i < chunk->pages - && !(BTGet(chunk->allocTable, i) - && PageIsAllocated(ChunkPage(chunk, i)))) { - ++i; - } - if (i == chunk->pages) - return FALSE; - AVER(i < chunk->pages); - *tractReturn = PageTract(ChunkPage(chunk, i)); - return TRUE; -} - - -/* tractSearch - * - * Searches for the next tract in increasing address order. - * The tract returned is the next one along from addr (i.e., - * it has a base address bigger than addr and no other tract - * with a base address bigger than addr has a smaller base address). - * - * Returns FALSE if there is no tract to find (end of the arena). - */ - -static Bool tractSearch(Tract *tractReturn, Arena arena, Addr addr) -{ - Bool b; - Chunk chunk; - - b = ChunkOfAddr(&chunk, arena, addr); - if (b) { - Index i; - - i = INDEX_OF_ADDR(chunk, addr); - /* There are fewer pages than addresses, therefore the */ - /* page index can never wrap around */ - AVER_CRITICAL(i+1 != 0); - - if (tractSearchInChunk(tractReturn, chunk, i+1)) { - return TRUE; - } - } - while (chunkAboveAddr(&chunk, arena, addr)) { - addr = chunk->base; - /* Start from allocBase to skip the tables. */ - if (tractSearchInChunk(tractReturn, chunk, chunk->allocBase)) { - return TRUE; - } - } - return FALSE; -} - - -/* TractFirst -- return the first tract in the arena - * - * This is used to start an iteration over all tracts in the arena, not - * including the ones used for page tables and other arena structures. - */ - -Bool TractFirst(Tract *tractReturn, Arena arena) -{ - AVER(tractReturn != NULL); - AVERT(Arena, arena); - - /* .tractfirst.assume.nozero: We assume that there is no tract */ - /* with base address (Addr)0. Happily this assumption is sound */ - /* for a number of reasons. */ - return tractSearch(tractReturn, arena, (Addr)0); -} - - -/* TractNext -- return the "next" tract in the arena - * - * TractNext finds the tract with the lowest base address which is - * greater than a specified address. The address must be (or once - * have been) the base address of a tract. - * - * This is used as the iteration step when iterating over all - * tracts in the arena. - */ - -Bool TractNext(Tract *tractReturn, Arena arena, Addr addr) -{ - AVER_CRITICAL(tractReturn != NULL); /* .tract.critical */ - AVERT_CRITICAL(Arena, arena); - AVER_CRITICAL(AddrIsAligned(addr, arena->alignment)); - - return tractSearch(tractReturn, arena, addr); -} - - /* PageAlloc * * Sets up the page descriptor for an allocated page to turn it into a Tract. diff --git a/mps/code/tract.h b/mps/code/tract.h index 3def4d0a3d0..88608f7fe2e 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -217,10 +217,6 @@ extern Index IndexOfAddr(Chunk chunk, Addr addr); END -extern Bool TractFirst(Tract *tractReturn, Arena arena); -extern Bool TractNext(Tract *tractReturn, Arena arena, Addr addr); - - /* TRACT_TRACT_FOR -- iterate over a range of tracts in a chunk * * See . From 4538221eb16f247e12f01c6e14e32dfa5b13b71a Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 12 Jun 2014 19:52:43 +0100 Subject: [PATCH 22/24] Remove obsolete macro arenachunkring. Copied from Perforce Change: 186549 ServerID: perforce.ravenbrook.com --- mps/code/arenavm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index e8c3cdd15e5..33d1579e82b 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -968,8 +968,6 @@ static Size chunkUnmapAroundPage(Chunk chunk, Size size, Page page) * unmapped. */ -#define ArenaChunkRing(arena) (&(arena)->chunkRing) - static Size arenaUnmapSpare(Arena arena, Size size, Chunk filter) { Ring node; From 8c9de5f775b1f556ce05f47c308908216313e292 Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Thu, 12 Jun 2014 20:28:50 +0100 Subject: [PATCH 23/24] New function arenachunktreetraverse ensures that calls to chunkofaddr are reliably detected. Copied from Perforce Change: 186550 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 41 ++++++++++++++++++++++++++++++++++------- mps/code/arenacl.c | 5 +++-- mps/code/arenavm.c | 5 +++-- mps/code/mpm.h | 1 + mps/code/tree.c | 1 + mps/code/tree.h | 10 ++++++++++ 6 files changed, 52 insertions(+), 11 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 172ef9c671a..668cdab9e5c 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -147,8 +147,7 @@ Bool ArenaCheck(Arena arena) if (arena->primary != NULL) { CHECKD(Chunk, arena->primary); } - /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ - CHECKL(TreeCheck(ArenaChunkTree(arena))); + /* Can't check chunkTree, it might be bad during ArenaChunkTreeTraverse. */ /* nothing to check for chunkSerial */ CHECKL(LocusCheck(arena)); @@ -570,8 +569,7 @@ Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth) cl.stream = stream; cl.res = ResOK; - (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, - arenaDescribeTractsInChunk, &cl, depth); + (void)ArenaChunkTreeTraverse(arena, arenaDescribeTractsInChunk, &cl, depth); return cl.res; } @@ -635,6 +633,34 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream, Count depth) } +/* ArenaChunkTreeTraverse -- call visitor for each chunk */ + +Bool ArenaChunkTreeTraverse(Arena arena, TreeVisitor visitor, + void *closureP, Size closureS) +{ + Bool b; + Tree tree; + + AVERT(Arena, arena); + AVER(FUNCHECK(visitor)); + /* closureP and closureS are arbitrary. */ + + /* During TreeTraverse, the tree is invalid, so temporarily set + * arena->chunkTree to an invalid value, to ensure that if someone + * calls ChunkOfAddr while we are iterating this results in an + * assertion failure (in checking varieties) or a segfault, rather + * than a silent failure to find the chunk. + */ + + tree = ArenaChunkTree(arena); + arena->chunkTree = TreeBAD; + b = TreeTraverse(tree, ChunkCompare, ChunkKey, visitor, closureP, closureS); + arena->chunkTree = tree; + + return b; +} + + /* ArenaChunkInsert -- insert chunk into arena's chunk tree */ void ArenaChunkInsert(Arena arena, Tree tree) { @@ -681,6 +707,7 @@ static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) AVER(closureP != NULL); cl = closureP; AVER(cl->arena == ChunkArena(chunk)); + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); /* Already searched in arenaAllocPage. */ @@ -725,12 +752,12 @@ static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) /* Favour the primary chunk, because pages allocated this way aren't currently freed, and we don't want to prevent chunks being destroyed. */ /* TODO: Consider how the ArenaCBSBlockPool might free pages. */ - if (!arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, 0)) + if (!arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, UNUSED_SIZE)) goto found; closure.avoid = arena->primary; - if (!TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, - arenaAllocPageInChunk, &closure, 0)) + if (!ArenaChunkTreeTraverse(arena, arenaAllocPageInChunk, + &closure, UNUSED_SIZE)) goto found; AVER(closure.res != ResOK); diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index cb4d9865ae3..a6d7f79c65c 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -349,6 +349,7 @@ static Bool clientArenaReservedVisitor(Tree tree, void *closureP, Size closureS) AVERT(Chunk, chunk); AVER(closureP != 0); size = closureP; + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); *size += ChunkSize(chunk); @@ -361,8 +362,8 @@ static Size ClientArenaReserved(Arena arena) AVERT(Arena, arena); - (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, - clientArenaReservedVisitor, &size, 0); + (void)ArenaChunkTreeTraverse(arena, clientArenaReservedVisitor, + &size, UNUSED_SIZE); return size; } diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 33d1579e82b..07a1d492aa2 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -642,6 +642,7 @@ static Bool vmArenaReservedVisitor(Tree tree, void *closureP, Size closureS) AVERT(Chunk, chunk); AVER(closureP != 0); size = closureP; + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); *size += VMReserved(Chunk2VMChunk(chunk)->vm); @@ -655,8 +656,8 @@ static Size VMArenaReserved(Arena arena) AVERT(Arena, arena); - (void)TreeTraverse(ArenaChunkTree(arena), ChunkCompare, ChunkKey, - vmArenaReservedVisitor, &size, 0); + (void)ArenaChunkTreeTraverse(arena, vmArenaReservedVisitor, + &size, UNUSED_SIZE); return size; } diff --git a/mps/code/mpm.h b/mps/code/mpm.h index ccec0aff383..a0d5f858a10 100644 --- a/mps/code/mpm.h +++ b/mps/code/mpm.h @@ -557,6 +557,7 @@ extern Res ArenaStartCollect(Globals globals, int why); extern Res ArenaCollect(Globals globals, int why); extern Bool ArenaHasAddr(Arena arena, Addr addr); extern Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr); +extern Bool ArenaChunkTreeTraverse(Arena arena, TreeVisitor visitor, void *closureP, Size closureS); extern void ArenaChunkInsert(Arena arena, Tree tree); extern void ArenaSetEmergency(Arena arena, Bool emergency); diff --git a/mps/code/tree.c b/mps/code/tree.c index e6dd902f3fd..ff77fde15c4 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -24,6 +24,7 @@ SRCID(tree, "$Id$"); Bool TreeCheck(Tree tree) { if (tree != TreeEMPTY) { + CHECKL(tree != TreeBAD); CHECKL(tree != NULL); CHECKL(tree->left == TreeEMPTY || tree->left != NULL); CHECKL(tree->right == TreeEMPTY || tree->right != NULL); diff --git a/mps/code/tree.h b/mps/code/tree.h index 4ac1a82eaaf..5c0ea05ac51 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -64,6 +64,16 @@ typedef TreeKey (*TreeKeyMethod)(Tree tree); #define TreeEMPTY ((Tree)0) + +/* TreeBAD -- an invalid tree + * + * TreeBAD is a value that's not equal to any tree (not even to + * the empty tree). + */ + +#define TreeBAD ((Tree)7) + + extern Bool TreeCheck(Tree tree); extern Bool TreeCheckLeaf(Tree tree); extern Count TreeDebugCount(Tree tree, TreeCompare compare, TreeKeyMethod key); From 100405cac2d7a6556491279759b8714f98b2feea Mon Sep 17 00:00:00 2001 From: Gareth Rees Date: Fri, 13 Jun 2014 12:31:47 +0100 Subject: [PATCH 24/24] Restore the chunk ring. Copied from Perforce Change: 186558 ServerID: perforce.ravenbrook.com --- mps/code/arena.c | 183 +++++++++++++++------------------------------ mps/code/arenacl.c | 29 +++---- mps/code/arenavm.c | 33 ++------ mps/code/mpm.h | 3 +- mps/code/mpmst.h | 3 +- mps/code/tract.c | 4 +- mps/code/tract.h | 1 + mps/code/tree.c | 1 - mps/code/tree.h | 9 --- 9 files changed, 83 insertions(+), 183 deletions(-) diff --git a/mps/code/arena.c b/mps/code/arena.c index 668cdab9e5c..1d91deca1f2 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -147,7 +147,10 @@ Bool ArenaCheck(Arena arena) if (arena->primary != NULL) { CHECKD(Chunk, arena->primary); } - /* Can't check chunkTree, it might be bad during ArenaChunkTreeTraverse. */ + CHECKD_NOSIG(Ring, &arena->chunkRing); + /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ + CHECKL(TreeCheck(ArenaChunkTree(arena))); + /* TODO: check that the chunkRing and chunkTree have identical members */ /* nothing to check for chunkSerial */ CHECKL(LocusCheck(arena)); @@ -204,6 +207,7 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) arena->zoned = zoned; arena->primary = NULL; + RingInit(&arena->chunkRing); arena->chunkTree = TreeEMPTY; arena->chunkSerial = (Serial)0; @@ -349,6 +353,7 @@ void ArenaFinish(Arena arena) arena->sig = SigInvalid; GlobalsFinish(ArenaGlobals(arena)); LocusFinish(arena); + RingFinish(&arena->chunkRing); AVER(ArenaChunkTree(arena) == TreeEMPTY); } @@ -497,31 +502,20 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth) /* arenaDescribeTractsInChunk -- describe the tracts in a chunk */ -typedef struct ArenaDescribeTractsClosureStruct { - mps_lib_FILE *stream; - Res res; -} ArenaDescribeTractsClosureStruct, *ArenaDescribeTractsClosure; - -static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) +static Res arenaDescribeTractsInChunk(Chunk chunk, mps_lib_FILE *stream, Count depth) { - ArenaDescribeTractsClosure cl = closureP; - Count depth = closureS; - mps_lib_FILE *stream; - Chunk chunk; + Res res; Index pi; - Res res = ResFAIL; - if (closureP == NULL) return FALSE; - chunk = ChunkOfTree(tree); - if (!TESTT(Chunk, chunk)) goto fail; - stream = cl->stream; - if (stream == NULL) goto fail; + if (stream == NULL) return ResFAIL; + if (!TESTT(Chunk, chunk)) return ResFAIL; + if (stream == NULL) return ResFAIL; res = WriteF(stream, depth, "Chunk [$P, $P) ($U) {\n", (WriteFP)chunk->base, (WriteFP)chunk->limit, (WriteFU)chunk->serial, NULL); - if (res != ResOK) goto fail; + if (res != ResOK) return res; for (pi = chunk->allocBase; pi < chunk->pages; ++pi) { if (BTGet(chunk->allocTable, pi)) { @@ -530,7 +524,7 @@ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) (WriteFP)TractBase(tract), (WriteFP)TractLimit(tract, ChunkArena(chunk)), NULL); - if (res != ResOK) goto fail; + if (res != ResOK) return res; if (TractHasPool(tract)) { Pool pool = TractPool(tract); res = WriteF(stream, 0, " $P $U ($S)", @@ -538,23 +532,17 @@ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) (WriteFU)(pool->serial), (WriteFS)(pool->class->name), NULL); - if (res != ResOK) goto fail; + if (res != ResOK) return res; } res = WriteF(stream, 0, "\n", NULL); - if (res != ResOK) goto fail; + if (res != ResOK) return res; } } res = WriteF(stream, depth, "} Chunk [$P, $P)\n", (WriteFP)chunk->base, (WriteFP)chunk->limit, NULL); - if (res != ResOK) goto fail; - - return TRUE; - -fail: - cl->res = res; - return FALSE; + return res; } @@ -562,15 +550,19 @@ static Bool arenaDescribeTractsInChunk(Tree tree, void *closureP, Size closureS) Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth) { - ArenaDescribeTractsClosureStruct cl; + Ring node, next; + Res res; if (!TESTT(Arena, arena)) return ResFAIL; if (stream == NULL) return ResFAIL; - cl.stream = stream; - cl.res = ResOK; - (void)ArenaChunkTreeTraverse(arena, arenaDescribeTractsInChunk, &cl, depth); - return cl.res; + RING_FOR(node, &arena->chunkRing, next) { + Chunk chunk = RING_ELT(Chunk, chunkRing, node); + res = arenaDescribeTractsInChunk(chunk, stream, depth); + if (res != ResOK) return res; + } + + return ResOK; } @@ -633,48 +625,22 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream, Count depth) } -/* ArenaChunkTreeTraverse -- call visitor for each chunk */ - -Bool ArenaChunkTreeTraverse(Arena arena, TreeVisitor visitor, - void *closureP, Size closureS) -{ - Bool b; - Tree tree; - - AVERT(Arena, arena); - AVER(FUNCHECK(visitor)); - /* closureP and closureS are arbitrary. */ - - /* During TreeTraverse, the tree is invalid, so temporarily set - * arena->chunkTree to an invalid value, to ensure that if someone - * calls ChunkOfAddr while we are iterating this results in an - * assertion failure (in checking varieties) or a segfault, rather - * than a silent failure to find the chunk. - */ - - tree = ArenaChunkTree(arena); - arena->chunkTree = TreeBAD; - b = TreeTraverse(tree, ChunkCompare, ChunkKey, visitor, closureP, closureS); - arena->chunkTree = tree; - - return b; -} - - /* ArenaChunkInsert -- insert chunk into arena's chunk tree */ -void ArenaChunkInsert(Arena arena, Tree tree) { +void ArenaChunkInsert(Arena arena, Chunk chunk) { Bool inserted; - Tree updatedTree = NULL; + Tree tree, updatedTree = NULL; AVERT(Arena, arena); - AVERT(Tree, tree); + AVERT(Chunk, chunk); + tree = &chunk->chunkTree; inserted = TreeInsert(&updatedTree, ArenaChunkTree(arena), tree, ChunkKey(tree), ChunkCompare); AVER(inserted && updatedTree); TreeBalance(&updatedTree); arena->chunkTree = updatedTree; + RingAppend(&arena->chunkRing, &chunk->chunkRing); } @@ -686,87 +652,56 @@ void ArenaChunkInsert(Arena arena, Tree tree) { * bootstrap. */ -typedef struct ArenaAllocPageClosureStruct { - Arena arena; - Pool pool; - Addr base; - Chunk avoid; - Res res; -} ArenaAllocPageClosureStruct, *ArenaAllocPageClosure; - -static Bool arenaAllocPageInChunk(Tree tree, void *closureP, Size closureS) +static Res arenaAllocPageInChunk(Addr *baseReturn, Chunk chunk, Pool pool) { - ArenaAllocPageClosure cl; - Chunk chunk; - Index basePageIndex, limitPageIndex; Res res; + Index basePageIndex, limitPageIndex; + Arena arena; - AVERT(Tree, tree); - chunk = ChunkOfTree(tree); + AVER(baseReturn != NULL); AVERT(Chunk, chunk); - AVER(closureP != NULL); - cl = closureP; - AVER(cl->arena == ChunkArena(chunk)); - AVER(closureS == UNUSED_SIZE); - UNUSED(closureS); + AVERT(Pool, pool); + arena = ChunkArena(chunk); - /* Already searched in arenaAllocPage. */ - if (chunk == cl->avoid) { - cl->res = ResRESOURCE; - return TRUE; - } - if (!BTFindShortResRange(&basePageIndex, &limitPageIndex, chunk->allocTable, chunk->allocBase, chunk->pages, 1)) - { - cl->res = ResRESOURCE; - return TRUE; - } + return ResRESOURCE; - res = (*cl->arena->class->pagesMarkAllocated)(cl->arena, chunk, - basePageIndex, 1, cl->pool); - if (res != ResOK) { - cl->res = res; - return TRUE; - } - - cl->base = PageIndexBase(chunk, basePageIndex); - return FALSE; + res = (*arena->class->pagesMarkAllocated)(arena, chunk, + basePageIndex, 1, + pool); + if (res != ResOK) + return res; + + *baseReturn = PageIndexBase(chunk, basePageIndex); + return ResOK; } static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) { - ArenaAllocPageClosureStruct closure; + Res res; AVER(baseReturn != NULL); AVERT(Arena, arena); AVERT(Pool, pool); - closure.arena = arena; - closure.pool = pool; - closure.base = NULL; - closure.avoid = NULL; - closure.res = ResOK; - /* Favour the primary chunk, because pages allocated this way aren't currently freed, and we don't want to prevent chunks being destroyed. */ /* TODO: Consider how the ArenaCBSBlockPool might free pages. */ - if (!arenaAllocPageInChunk(&arena->primary->chunkTree, &closure, UNUSED_SIZE)) - goto found; - - closure.avoid = arena->primary; - if (!ArenaChunkTreeTraverse(arena, arenaAllocPageInChunk, - &closure, UNUSED_SIZE)) - goto found; - - AVER(closure.res != ResOK); - return closure.res; - -found: - AVER(closure.base != NULL); - *baseReturn = closure.base; - return ResOK; + res = arenaAllocPageInChunk(baseReturn, arena->primary, pool); + if (res != ResOK) { + Ring node, next; + RING_FOR(node, &arena->chunkRing, next) { + Chunk chunk = RING_ELT(Chunk, chunkRing, node); + if (chunk != arena->primary) { + res = arenaAllocPageInChunk(baseReturn, chunk, pool); + if (res == ResOK) + break; + } + } + } + return res; } diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c index a6d7f79c65c..04594610112 100644 --- a/mps/code/arenacl.c +++ b/mps/code/arenacl.c @@ -339,31 +339,20 @@ static Res ClientArenaExtend(Arena arena, Addr base, Size size) /* ClientArenaReserved -- return the amount of reserved address space */ -static Bool clientArenaReservedVisitor(Tree tree, void *closureP, Size closureS) -{ - Size *size; - Chunk chunk; - - AVERT(Tree, tree); - chunk = ChunkOfTree(tree); - AVERT(Chunk, chunk); - AVER(closureP != 0); - size = closureP; - AVER(closureS == UNUSED_SIZE); - UNUSED(closureS); - - *size += ChunkSize(chunk); - return TRUE; -} - static Size ClientArenaReserved(Arena arena) { - Size size = 0; + Size size; + Ring node, nextNode; AVERT(Arena, arena); - (void)ArenaChunkTreeTraverse(arena, clientArenaReservedVisitor, - &size, UNUSED_SIZE); + size = 0; + /* .req.extend.slow */ + RING_FOR(node, &arena->chunkRing, nextNode) { + Chunk chunk = RING_ELT(Chunk, chunkRing, node); + AVERT(Chunk, chunk); + size += ChunkSize(chunk); + } return size; } diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 07a1d492aa2..700a1f229ae 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -632,34 +632,17 @@ static void VMArenaFinish(Arena arena) * Add up the reserved space from all the chunks. */ -static Bool vmArenaReservedVisitor(Tree tree, void *closureP, Size closureS) -{ - Size *size; - Chunk chunk; - - AVERT(Tree, tree); - chunk = ChunkOfTree(tree); - AVERT(Chunk, chunk); - AVER(closureP != 0); - size = closureP; - AVER(closureS == UNUSED_SIZE); - UNUSED(closureS); - - *size += VMReserved(Chunk2VMChunk(chunk)->vm); - return TRUE; -} - - static Size VMArenaReserved(Arena arena) { - Size size = 0; + Size reserved; + Ring node, next; - AVERT(Arena, arena); - - (void)ArenaChunkTreeTraverse(arena, vmArenaReservedVisitor, - &size, UNUSED_SIZE); - - return size; + reserved = 0; + RING_FOR(node, &arena->chunkRing, next) { + VMChunk vmChunk = Chunk2VMChunk(RING_ELT(Chunk, chunkRing, node)); + reserved += VMReserved(vmChunk->vm); + } + return reserved; } diff --git a/mps/code/mpm.h b/mps/code/mpm.h index a0d5f858a10..e3a7582ba4b 100644 --- a/mps/code/mpm.h +++ b/mps/code/mpm.h @@ -557,8 +557,7 @@ extern Res ArenaStartCollect(Globals globals, int why); extern Res ArenaCollect(Globals globals, int why); extern Bool ArenaHasAddr(Arena arena, Addr addr); extern Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr); -extern Bool ArenaChunkTreeTraverse(Arena arena, TreeVisitor visitor, void *closureP, Size closureS); -extern void ArenaChunkInsert(Arena arena, Tree tree); +extern void ArenaChunkInsert(Arena arena, Chunk chunk); extern void ArenaSetEmergency(Arena arena, Bool emergency); extern Bool ArenaEmergency(Arena arean); diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h index 02a9b91bc24..2edde40e297 100644 --- a/mps/code/mpmst.h +++ b/mps/code/mpmst.h @@ -732,7 +732,8 @@ typedef struct mps_arena_s { Addr lastTractBase; /* base address of lastTract */ Chunk primary; /* the primary chunk */ - Tree chunkTree; /* all the chunks */ + RingStruct chunkRing; /* all the chunks, in a ring for iteration */ + Tree chunkTree; /* all the chunks, in a tree for fast lookup */ Serial chunkSerial; /* next chunk number */ Bool hasFreeLand; /* Is freeLand available? */ diff --git a/mps/code/tract.c b/mps/code/tract.c index 0f9084d6345..f010a7231dd 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -184,6 +184,7 @@ Res ChunkInit(Chunk chunk, Arena arena, chunk->serial = (arena->chunkSerial)++; chunk->arena = arena; + RingInit(&chunk->chunkRing); chunk->pageSize = pageSize; chunk->pageShift = pageShift = SizeLog2(pageSize); @@ -229,7 +230,7 @@ Res ChunkInit(Chunk chunk, Arena arena, chunk->sig = ChunkSig; AVERT(Chunk, chunk); - ArenaChunkInsert(arena, &chunk->chunkTree); + ArenaChunkInsert(arena, chunk); /* As part of the bootstrap, the first created chunk becomes the primary chunk. This step allows AreaFreeLandInsert to allocate pages. */ @@ -267,6 +268,7 @@ void ChunkFinish(Chunk chunk) chunk->sig = SigInvalid; TreeFinish(&chunk->chunkTree); + RingRemove(&chunk->chunkRing); if (chunk->arena->primary == chunk) chunk->arena->primary = NULL; diff --git a/mps/code/tract.h b/mps/code/tract.h index 88608f7fe2e..5be02f0c88e 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -137,6 +137,7 @@ typedef struct ChunkStruct { Sig sig; /* */ Serial serial; /* serial within the arena */ Arena arena; /* parent arena */ + RingStruct chunkRing; /* node in ring of all chunks in arena */ TreeStruct chunkTree; /* node in tree of all chunks in arena */ Size pageSize; /* size of pages */ Shift pageShift; /* log2 of page size, for shifts */ diff --git a/mps/code/tree.c b/mps/code/tree.c index ff77fde15c4..e6dd902f3fd 100644 --- a/mps/code/tree.c +++ b/mps/code/tree.c @@ -24,7 +24,6 @@ SRCID(tree, "$Id$"); Bool TreeCheck(Tree tree) { if (tree != TreeEMPTY) { - CHECKL(tree != TreeBAD); CHECKL(tree != NULL); CHECKL(tree->left == TreeEMPTY || tree->left != NULL); CHECKL(tree->right == TreeEMPTY || tree->right != NULL); diff --git a/mps/code/tree.h b/mps/code/tree.h index 5c0ea05ac51..296c9528a1a 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -65,15 +65,6 @@ typedef TreeKey (*TreeKeyMethod)(Tree tree); #define TreeEMPTY ((Tree)0) -/* TreeBAD -- an invalid tree - * - * TreeBAD is a value that's not equal to any tree (not even to - * the empty tree). - */ - -#define TreeBAD ((Tree)7) - - extern Bool TreeCheck(Tree tree); extern Bool TreeCheckLeaf(Tree tree); extern Count TreeDebugCount(Tree tree, TreeCompare compare, TreeKeyMethod key);