Catch-up merge from master sources to branch/2014-10-26/sc.

Copied from Perforce
 Change: 190483
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Richard Brooksby 2016-03-27 21:28:39 +01:00
commit f501f73eb9
102 changed files with 1913 additions and 628 deletions

View file

@ -169,13 +169,14 @@ static mps_pool_debug_option_s fenceOptions = {
*/
static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
size_t arena_grain_size,
mps_pool_debug_option_s *options)
{
mps_arena_t arena;
die(mps_arena_create_k(&arena, arena_class, arena_args), "mps_arena_create");
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
mps_align_t align = rnd_align(sizeof(void *), arena_grain_size);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
@ -185,18 +186,19 @@ static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
mps_class_mvff(), args), "stress MVFF");
} MPS_ARGS_END(args);
/* IWBN to test MVFFDebug, but the MPS doesn't support debugging APs, */
/* yet (MV Debug works here, because it fakes it through PoolAlloc). */
/* IWBN to test MVFFDebug, but the MPS doesn't support debugging
APs, yet (MV Debug works here, because it fakes it through
PoolAlloc). See job003995. */
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
mps_align_t align = rnd_align(sizeof(void *), arena_grain_size);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, NULL, align, randomSizeAligned, "MV",
mps_class_mv(), args), "stress MV");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
mps_align_t align = rnd_align(sizeof(void *), arena_grain_size);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options);
die(stress(arena, options, align, randomSizeAligned, "MV debug",
@ -204,7 +206,7 @@ static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
mps_align_t align = rnd_align(sizeof(void *), arena_grain_size);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, NULL, align, randomSizeAligned, "MVT",
mps_class_mvt(), args), "stress MVT");
@ -218,28 +220,33 @@ static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
int main(int argc, char *argv[])
{
size_t arena_grain_size;
testlib_init(argc, argv);
arena_grain_size = rnd_grain(2 * testArenaSIZE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 2 * testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(2*testArenaSIZE));
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size);
MPS_ARGS_ADD(args, MPS_KEY_COMMIT_LIMIT, testArenaSIZE);
test(mps_arena_class_vm(), args, &fenceOptions);
test(mps_arena_class_vm(), args, arena_grain_size, &fenceOptions);
} MPS_ARGS_END(args);
arena_grain_size = rnd_grain(2 * testArenaSIZE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 2 * testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(2*testArenaSIZE));
test(mps_arena_class_vm(), args, &bothOptions);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size);
test(mps_arena_class_vm(), args, arena_grain_size, &bothOptions);
} MPS_ARGS_END(args);
arena_grain_size = rnd_grain(testArenaSIZE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_CL_BASE, malloc(testArenaSIZE));
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE));
test(mps_arena_class_cl(), args, &bothOptions);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size);
test(mps_arena_class_cl(), args, arena_grain_size, &bothOptions);
} MPS_ARGS_END(args);
printf("%s: Conclusion: Failed to find any defects.\n", argv[0]);

View file

@ -95,6 +95,7 @@ TEST_TARGETS=\
sacss.exe \
segsmss.exe \
steptest.exe \
tagtest.exe \
teletest.exe \
walkt0.exe \
zcoll.exe \

View file

@ -483,8 +483,8 @@
/* Shield Configuration -- see <code/shield.c> */
#define ShieldCacheSIZE ((size_t)16)
#define ShieldDepthWIDTH (4)
#define ShieldQueueLENGTH 512 /* initial length of shield queue */
#define ShieldDepthWIDTH 4 /* log2(max nested exposes + 1) */
/* VM Configuration -- see <code/vm*.c> */
@ -668,6 +668,25 @@
}
/* Write barrier deferral
*
* See design.mps.write-barrier.deferral.
*
* TODO: These settings were determined by trial and error, but should
* be based on measurement of the protection overhead on each
* platform. We know it's extremely different between OS X and
* Windows, for example. See design.mps.write-barrier.improv.by-os.
*
* TODO: Consider basing the count on the amount of time that has
* passed in the mutator rather than the number of scans.
*/
#define WB_DEFER_BITS 2 /* bitfield width for deferral count */
#define WB_DEFER_INIT 3 /* boring scans after new segment */
#define WB_DEFER_DELAY 3 /* boring scans after interesting scan */
#define WB_DEFER_HIT 1 /* boring scans after barrier hit */
#endif /* config_h */

View file

@ -27,7 +27,7 @@ CFLAGSCOMPILER := \
-Wstrict-prototypes \
-Wswitch-default \
-Wwrite-strings
CFLAGSCOMPILERSTRICT := -ansi -pedantic
CFLAGSCOMPILERSTRICT := -std=c89 -pedantic
# A different set of compiler flags for less strict compilation, for
# instance when we need to #include a third-party header file that

View file

@ -108,7 +108,6 @@ Bool GlobalsCheck(Globals arenaGlobals)
TraceId ti;
Trace trace;
Index i;
Size depth;
RefSet rs;
Rank rank;
@ -155,20 +154,7 @@ Bool GlobalsCheck(Globals arenaGlobals)
CHECKD_NOSIG(Ring, &arena->threadRing);
CHECKD_NOSIG(Ring, &arena->deadRing);
CHECKL(BoolCheck(arena->insideShield));
CHECKL(arena->shCacheLimit <= ShieldCacheSIZE);
CHECKL(arena->shCacheI < arena->shCacheLimit);
CHECKL(BoolCheck(arena->suspended));
depth = 0;
for (i = 0; i < arena->shCacheLimit; ++i) {
Seg seg = arena->shCache[i];
if (seg != NULL) {
CHECKD(Seg, seg);
depth += SegDepth(seg);
}
}
CHECKL(depth <= arena->shDepth);
CHECKD(Shield, ArenaShield(arena));
CHECKL(TraceSetCheck(arena->busyTraces));
CHECKL(TraceSetCheck(arena->flippedTraces));
@ -294,13 +280,7 @@ Res GlobalsInit(Globals arenaGlobals)
arena->tracedWork = 0.0;
arena->tracedTime = 0.0;
arena->lastWorldCollect = ClockNow();
arena->insideShield = FALSE; /* <code/shield.c> */
arena->shCacheI = (Size)0;
arena->shCacheLimit = (Size)1;
arena->shDepth = (Size)0;
arena->suspended = FALSE;
for(i = 0; i < ShieldCacheSIZE; i++)
arena->shCache[i] = NULL;
ShieldInit(ArenaShield(arena));
for (ti = 0; ti < TraceLIMIT; ++ti) {
/* <design/arena/#trace.invalid> */
@ -405,6 +385,7 @@ void GlobalsFinish(Globals arenaGlobals)
arenaGlobals->sig = SigInvalid;
ShieldFinish(ArenaShield(arena));
RingFinish(&arena->formatRing);
RingFinish(&arena->chainRing);
RingFinish(&arena->messageRing);
@ -437,6 +418,7 @@ void GlobalsPrepareToDestroy(Globals arenaGlobals)
ArenaPark(arenaGlobals);
arena = GlobalsArena(arenaGlobals);
arenaDenounce(arena);
defaultChain = arenaGlobals->defaultChain;
@ -488,6 +470,8 @@ void GlobalsPrepareToDestroy(Globals arenaGlobals)
PoolDestroy(pool);
}
ShieldDestroyQueue(ArenaShield(arena), arena);
/* Check that the tear-down is complete: that the client has
* destroyed all data structures associated with the arena. We do
* this here rather than in GlobalsFinish because by the time that
@ -776,7 +760,7 @@ Bool ArenaStep(Globals globals, double interval, double multiplier)
trace = ArenaTrace(arena, (TraceId)0);
} else {
/* No traces are running: consider collecting the world. */
if (PolicyShouldCollectWorld(arena, availableEnd - now, now,
if (PolicyShouldCollectWorld(arena, (double)(availableEnd - now), now,
clocks_per_sec))
{
Res res;
@ -1000,7 +984,6 @@ Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth)
"rootSerial $U\n", (WriteFU)arenaGlobals->rootSerial,
"formatSerial $U\n", (WriteFU)arena->formatSerial,
"threadSerial $U\n", (WriteFU)arena->threadSerial,
arena->insideShield ? "inside" : "outside", " shield\n",
"busyTraces $B\n", (WriteFB)arena->busyTraces,
"flippedTraces $B\n", (WriteFB)arena->flippedTraces,
"epoch $U\n", (WriteFU)arena->epoch,
@ -1019,13 +1002,7 @@ Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth)
return res;
}
res = WriteF(stream, depth,
"} history\n",
"suspended $S\n", WriteFYesNo(arena->suspended),
"shDepth $U\n", (WriteFU)arena->shDepth,
"shCacheI $U\n", (WriteFU)arena->shCacheI,
/* @@@@ should SegDescribe the cached segs? */
NULL);
res = ShieldDescribe(ArenaShield(arena), stream, depth);
if (res != ResOK)
return res;

View file

@ -13,7 +13,6 @@ CC = clang
CFLAGSDEBUG = -O0 -g3
CFLAGSOPT = -O2 -g3
CFLAGSCOMPILER := \
-pedantic \
-Waggregate-return \
-Wall \
-Wcast-qual \
@ -32,7 +31,7 @@ CFLAGSCOMPILER := \
-Wstrict-prototypes \
-Wunreachable-code \
-Wwrite-strings
CFLAGSCOMPILERSTRICT :=
CFLAGSCOMPILERSTRICT := -std=c89 -pedantic
# A different set of compiler flags for less strict compilation, for
# instance when we need to #include a third-party header file that

View file

@ -9,6 +9,7 @@
* .sources: <design/writef/> */
#include "check.h"
#include "misc.h"
#include "mpm.h"
#include "vm.h"
@ -88,6 +89,11 @@ Bool MPMCheck(void)
* <design/sp/#sol.depth.constraint>. */
CHECKL(StackProbeDEPTH * sizeof(Word) < PageSize());
/* Check these values will fit in their bitfield. */
CHECKL(WB_DEFER_INIT <= ((1ul << WB_DEFER_BITS) - 1));
CHECKL(WB_DEFER_DELAY <= ((1ul << WB_DEFER_BITS) - 1));
CHECKL(WB_DEFER_HIT <= ((1ul << WB_DEFER_BITS) - 1));
return TRUE;
}
@ -614,16 +620,19 @@ Res WriteF_firstformat_v(mps_lib_FILE *stream, Count depth,
size_t StringLength(const char *s)
{
size_t i;
size_t i = 0;
AVER(s != NULL);
for(i = 0; s[i] != '\0'; i++)
NOOP;
return(i);
while (s[i] != '\0')
++i;
return i;
}
#if 0 /* This code is currently not in use in the MPS */
/* StringEqual -- slow substitute for (strcmp == 0) */
Bool StringEqual(const char *s1, const char *s2)
@ -644,6 +653,148 @@ Bool StringEqual(const char *s1, const char *s2)
return TRUE;
}
#endif /* not currently in use */
/* Random -- a random number generator
*
* TODO: This is a copy of the generator from testlib.c, which has
* extensive notes and verification tests. The notes need to go to a
* design document, and the tests to a test.
*/
static unsigned RandomSeed = 1;
#define Random_m 2147483647UL
#define Random_a 48271UL
unsigned Random32(void)
{
/* requires m == 2^31-1, a < 2^16 */
unsigned bot = Random_a * (RandomSeed & 0x7FFF);
unsigned top = Random_a * (RandomSeed >> 15);
AVER(UINT_MAX >= 4294967295U);
RandomSeed = bot + ((top & 0xFFFF) << 15) + (top >> 16);
if (RandomSeed > Random_m)
RandomSeed -= Random_m;
return RandomSeed;
}
Word RandomWord(void)
{
Word word = 0;
Index i;
for (i = 0; i < MPS_WORD_WIDTH; i += 31)
word = (word << 31) | Random32();
return word;
}
/* QuickSort -- non-recursive bounded sort
*
* We can't rely on the standard library's qsort, which might have
* O(n) stack usage. This version does not recurse.
*/
#ifdef QUICKSORT_DEBUG
static Bool quickSorted(void *array[], Count length,
QuickSortCompare compare, void *closure)
{
Index i;
if (length > 0) {
for (i = 0; i < length - 1; ++i) {
if (compare(array[i], array[i+1], closure) == CompareGREATER)
return FALSE;
}
}
return TRUE;
}
#endif
void QuickSort(void *array[], Count length,
QuickSortCompare compare, void *closure,
SortStruct *sortStruct)
{
Index left, right, sp, lo, hi, leftLimit, rightBase;
void *pivot, *temp;
AVER(array != NULL);
/* can't check length */
AVER(FUNCHECK(compare));
/* can't check closure */
AVER(sortStruct != NULL);
sp = 0;
left = 0;
right = length;
for (;;) {
while (right - left > 1) { /* only need to sort if two or more */
/* Pick a random pivot. */
pivot = array[left + RandomWord() % (right - left)];
/* Hoare partition: scan from left to right, dividing it into
elements less than the pivot and elements greater or
equal. */
lo = left;
hi = right;
for (;;) {
while (compare(array[lo], pivot, closure) == CompareLESS)
++lo;
do
--hi;
while (compare(pivot, array[hi], closure) == CompareLESS);
if (lo >= hi)
break;
temp = array[hi];
array[hi] = array[lo];
array[lo] = temp;
++lo; /* step over what we just swapped */
}
/* After partition, if we ended up at a pivot, then it is in its
final position and we must skip it to ensure termination.
This handles the case where the pivot is at the start of the
array, and one of the partitions is the whole array, for
example. */
if (lo == hi) {
AVER_CRITICAL(array[hi] == pivot); /* and it's in place */
leftLimit = lo;
rightBase = lo + 1;
} else {
AVER_CRITICAL(lo == hi + 1);
leftLimit = lo;
rightBase = lo;
}
/* Sort the smaller part now, so that we're sure to use at most
log2 length stack levels. Push the larger part on the stack
for later. */
AVER_CRITICAL(sp < sizeof sortStruct->stack / sizeof sortStruct->stack[0]);
if (leftLimit - left < right - rightBase) {
sortStruct->stack[sp].left = rightBase;
sortStruct->stack[sp].right = right;
++sp;
right = leftLimit;
} else {
sortStruct->stack[sp].left = left;
sortStruct->stack[sp].right = leftLimit;
++sp;
left = rightBase;
}
}
if (sp == 0)
break;
--sp;
left = sortStruct->stack[sp].left;
right = sortStruct->stack[sp].right;
AVER_CRITICAL(left < right); /* we will have done a zero-length part first */
}
#ifdef QUICKSORT_DEBUG
AVER(quickSorted(array, length, compare, closure));
#endif
}
/* C. COPYRIGHT AND LICENSE

View file

@ -173,6 +173,15 @@ extern Res WriteF_firstformat_v(mps_lib_FILE *stream, Count depth,
extern size_t StringLength(const char *s);
extern Bool StringEqual(const char *s1, const char *s2);
extern unsigned Random32(void);
extern Word RandomWord(void);
typedef Compare QuickSortCompare(void *left, void *right,
void *closure);
extern void QuickSort(void *array[], Count length,
QuickSortCompare compare, void *closure,
SortStruct *sortStruct);
/* Version Determination
*
@ -528,6 +537,7 @@ extern Ring GlobalsRememberedSummaryRing(Globals);
#define ArenaGreyRing(arena, rank) (&(arena)->greyRing[rank])
#define ArenaPoolRing(arena) (&ArenaGlobals(arena)->poolRing)
#define ArenaChunkTree(arena) RVALUE((arena)->chunkTree)
#define ArenaShield(arena) (&(arena)->shieldStruct)
extern Bool ArenaGrainSizeCheck(Size size);
#define AddrArenaGrainUp(addr, arena) AddrAlignUp(addr, ArenaGrainSize(arena))
@ -901,14 +911,19 @@ extern ZoneSet ZoneSetBlacklist(Arena arena);
/* Shield Interface -- see <code/shield.c> */
extern void ShieldInit(Shield shield);
extern void ShieldFinish(Shield shield);
extern Bool ShieldCheck(Shield shield);
extern Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth);
extern void ShieldDestroyQueue(Shield shield, Arena arena);
extern void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode);
extern void (ShieldLower)(Arena arena, Seg seg, AccessSet mode);
extern void (ShieldEnter)(Arena arena);
extern void (ShieldLeave)(Arena arena);
extern void (ShieldExpose)(Arena arena, Seg seg);
extern void (ShieldCover)(Arena arena, Seg seg);
extern void (ShieldSuspend)(Arena arena);
extern void (ShieldResume)(Arena arena);
extern void (ShieldHold)(Arena arena);
extern void (ShieldRelease)(Arena arena);
extern void (ShieldFlush)(Arena arena);
#if defined(SHIELD)
@ -924,8 +939,8 @@ extern void (ShieldFlush)(Arena arena);
BEGIN UNUSED(arena); UNUSED(seg); END
#define ShieldCover(arena, seg) \
BEGIN UNUSED(arena); UNUSED(seg); END
#define ShieldSuspend(arena) BEGIN UNUSED(arena); END
#define ShieldResume(arena) BEGIN UNUSED(arena); END
#define ShieldHold(arena) BEGIN UNUSED(arena); END
#define ShieldRelease(arena) BEGIN UNUSED(arena); END
#define ShieldFlush(arena) BEGIN UNUSED(arena); END
#else
#error "No shield configuration."

View file

@ -253,13 +253,15 @@ typedef struct SegStruct { /* segment structure */
Tract firstTract; /* first tract of segment */
RingStruct poolRing; /* link in list of segs in pool */
Addr limit; /* limit of segment */
unsigned depth : ShieldDepthWIDTH; /* see <code/shield.c#def.depth> */
unsigned depth : ShieldDepthWIDTH; /* see design.mps.shield.def.depth */
BOOLFIELD(queued); /* in shield queue? */
AccessSet pm : AccessLIMIT; /* protection mode, <code/shield.c> */
AccessSet sm : AccessLIMIT; /* shield mode, <code/shield.c> */
TraceSet grey : TraceLIMIT; /* traces for which seg is grey */
TraceSet white : TraceLIMIT; /* traces for which seg is white */
TraceSet nailed : TraceLIMIT; /* traces for which seg has nailed objects */
RankSet rankSet : RankLIMIT; /* ranks of references in this seg */
unsigned defer : WB_DEFER_BITS; /* defer write barrier for this many scans */
} SegStruct;
@ -676,9 +678,46 @@ typedef struct FreelistStruct {
} FreelistStruct;
/* SortStruct -- extra memory required by sorting
*
* See QuickSort in mpm.c. This exists so that the caller can make
* the choice about where to allocate the memory, since the MPS has to
* operate in tight stack constraints -- see design.mps.sp.
*/
typedef struct SortStruct {
struct {
Index left, right;
} stack[MPS_WORD_WIDTH];
} SortStruct;
/* ShieldStruct -- per-arena part of the shield
*
* See design.mps.shield, impl.c.shield.
*/
#define ShieldSig ((Sig)0x519581E1) /* SIGnature SHEILd */
typedef struct ShieldStruct {
Sig sig; /* design.mps.sig */
Bool inside; /* design.mps.shield.def.inside */
Seg *queue; /* queue of unsynced segs */
Count length; /* number of elements in shield queue */
Index next; /* next free element in shield queue */
Index limit; /* high water mark for cache usage */
Count depth; /* sum of depths of all segs */
Count unsynced; /* number of unsynced segments */
Count holds; /* number of holds */
Bool suspended; /* mutator suspended? */
SortStruct sortStruct; /* workspace for queue sort */
} ShieldStruct;
/* ArenaStruct -- generic arena
*
* See <code/arena.c>. */
* See <code/arena.c>.
*/
#define ArenaSig ((Sig)0x519A6E4A) /* SIGnature ARENA */
@ -736,15 +775,9 @@ typedef struct mps_arena_s {
RingStruct threadRing; /* ring of attached threads */
RingStruct deadRing; /* ring of dead threads */
Serial threadSerial; /* serial of next thread */
/* shield fields (<code/shield.c>) */
Bool insideShield; /* TRUE if and only if inside shield */
Seg shCache[ShieldCacheSIZE]; /* Cache of unsynced segs */
Size shCacheI; /* index into cache */
Size shCacheLimit; /* High water mark for cache usage */
Size shDepth; /* sum of depths of all segs */
Bool suspended; /* TRUE iff mutator suspended */
ShieldStruct shieldStruct;
/* trace fields (<code/trace.c>) */
TraceSet busyTraces; /* set of running traces */
TraceSet flippedTraces; /* set of running and flipped traces */

View file

@ -112,6 +112,7 @@ typedef struct RangeStruct *Range; /* <design/range/> */
typedef struct LandStruct *Land; /* <design/land/> */
typedef struct LandClassStruct *LandClass; /* <design/land/> */
typedef unsigned FindDelete; /* <design/land/> */
typedef struct ShieldStruct *Shield; /* design.mps.shield */
/* Arena*Method -- see <code/mpmst.h#ArenaClassStruct> */

View file

@ -5324,7 +5324,7 @@
CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_C_LANGUAGE_STANDARD = ansi;
GCC_C_LANGUAGE_STANDARD = c89;
GCC_OPTIMIZATION_LEVEL = s;
GCC_PREPROCESSOR_DEFINITIONS = CONFIG_VAR_RASH;
GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES;
@ -5351,7 +5351,24 @@
SDKROOT = macosx;
SYMROOT = xc;
WARNING_CFLAGS = (
"-pedantic\n-Waggregate-return\n-Wall\n-Wcast-qual\n-Wconversion\n-Wduplicate-enum\n-Wextra\n-Winline\n-Wmissing-prototypes\n-Wmissing-variable-declarations\n-Wnested-externs\n-Wno-extended-offsetof\n-Wpointer-arith\n-Wshadow\n-Wstrict-aliasing=2\n-Wstrict-prototypes\n-Wunreachable-code\n-Wwrite-strings\n",
"-pedantic",
"-Waggregate-return",
"-Wall",
"-Wcast-qual",
"-Wconversion",
"-Wduplicate-enum",
"-Wextra",
"-Winline",
"-Wmissing-prototypes",
"-Wmissing-variable-declarations",
"-Wnested-externs",
"-Wno-extended-offsetof",
"-Wpointer-arith",
"-Wshadow",
"-Wstrict-aliasing=2",
"-Wstrict-prototypes",
"-Wunreachable-code",
"-Wwrite-strings",
);
};
name = RASH;
@ -5746,7 +5763,7 @@
CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
GCC_C_LANGUAGE_STANDARD = ansi;
GCC_C_LANGUAGE_STANDARD = c89;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = CONFIG_VAR_COOL;
GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES;
@ -5774,7 +5791,24 @@
SDKROOT = macosx;
SYMROOT = xc;
WARNING_CFLAGS = (
"-pedantic\n-Waggregate-return\n-Wall\n-Wcast-qual\n-Wconversion\n-Wduplicate-enum\n-Wextra\n-Winline\n-Wmissing-prototypes\n-Wmissing-variable-declarations\n-Wnested-externs\n-Wno-extended-offsetof\n-Wpointer-arith\n-Wshadow\n-Wstrict-aliasing=2\n-Wstrict-prototypes\n-Wunreachable-code\n-Wwrite-strings\n",
"-pedantic",
"-Waggregate-return",
"-Wall",
"-Wcast-qual",
"-Wconversion",
"-Wduplicate-enum",
"-Wextra",
"-Winline",
"-Wmissing-prototypes",
"-Wmissing-variable-declarations",
"-Wnested-externs",
"-Wno-extended-offsetof",
"-Wpointer-arith",
"-Wshadow",
"-Wstrict-aliasing=2",
"-Wstrict-prototypes",
"-Wunreachable-code",
"-Wwrite-strings",
);
};
name = Debug;
@ -5788,7 +5822,7 @@
CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_C_LANGUAGE_STANDARD = ansi;
GCC_C_LANGUAGE_STANDARD = c89;
GCC_OPTIMIZATION_LEVEL = s;
GCC_PREPROCESSOR_DEFINITIONS = CONFIG_VAR_HOT;
GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES;
@ -5815,7 +5849,24 @@
SDKROOT = macosx;
SYMROOT = xc;
WARNING_CFLAGS = (
"-pedantic\n-Waggregate-return\n-Wall\n-Wcast-qual\n-Wconversion\n-Wduplicate-enum\n-Wextra\n-Winline\n-Wmissing-prototypes\n-Wmissing-variable-declarations\n-Wnested-externs\n-Wno-extended-offsetof\n-Wpointer-arith\n-Wshadow\n-Wstrict-aliasing=2\n-Wstrict-prototypes\n-Wunreachable-code\n-Wwrite-strings\n",
"-pedantic",
"-Waggregate-return",
"-Wall",
"-Wcast-qual",
"-Wconversion",
"-Wduplicate-enum",
"-Wextra",
"-Winline",
"-Wmissing-prototypes",
"-Wmissing-variable-declarations",
"-Wnested-externs",
"-Wno-extended-offsetof",
"-Wpointer-arith",
"-Wshadow",
"-Wstrict-aliasing=2",
"-Wstrict-prototypes",
"-Wunreachable-code",
"-Wwrite-strings",
);
};
name = Release;

View file

@ -821,7 +821,7 @@ static Res MRGDescribe(Pool pool, mps_lib_FILE *stream, Count depth)
if (res != ResOK)
return res;
RING_FOR(node, &mrg->entryRing, nextNode) {
Bool outsideShield = !arena->insideShield;
Bool outsideShield = !ArenaShield(arena)->inside;
refPart = MRGRefPartOfLink(linkOfRing(node), arena);
if (outsideShield) {
ShieldEnter(arena);

View file

@ -29,6 +29,7 @@
#include "dbgpool.h"
#include "poolmv.h"
#include "poolmfs.h"
#include "mpscmvff.h"
#include "mpm.h"
SRCID(poolmv, "$Id$");
@ -236,7 +237,10 @@ static Res MVInit(Pool pool, ArgList args)
if (ArgPick(&arg, args, MPS_KEY_MAX_SIZE))
maxSize = arg.val.size;
arena = PoolArena(pool);
AVERT(Align, align);
AVER(align <= ArenaGrainSize(arena));
AVER(extendBy > 0);
AVER(avgSize > 0);
AVER(avgSize <= extendBy);
@ -245,7 +249,6 @@ static Res MVInit(Pool pool, ArgList args)
pool->alignment = align;
mv = PoolMV(pool);
arena = PoolArena(pool);
/* At 100% fragmentation we will need one block descriptor for every other */
/* allocated block, or (extendBy/avgSize)/2 descriptors. See note 1. */

View file

@ -259,10 +259,10 @@ static Res MVTInit(Pool pool, ArgList args)
AVERT(Align, align);
/* This restriction on the alignment is necessary because of the use
* of a Freelist to store the free address ranges in low-memory
* situations. See <design/freelist/#impl.grain.align>.
*/
of a Freelist to store the free address ranges in low-memory
situations. See <design/freelist/#impl.grain.align>. */
AVER(AlignIsAligned(align, FreelistMinimumAlignment));
AVER(align <= ArenaGrainSize(arena));
AVER(0 < minSize);
AVER(minSize <= meanSize);
AVER(meanSize <= maxSize);

View file

@ -486,10 +486,10 @@ static Res MVFFInit(Pool pool, ArgList args)
AVER(spare <= 1.0); /* .arg.check */
AVERT(Align, align);
/* This restriction on the alignment is necessary because of the use
* of a Freelist to store the free address ranges in low-memory
* situations. <design/freelist/#impl.grain.align>.
*/
of a Freelist to store the free address ranges in low-memory
situations. <design/freelist/#impl.grain.align>. */
AVER(AlignIsAligned(align, FreelistMinimumAlignment));
AVER(align <= ArenaGrainSize(arena));
AVERT(Bool, slotHigh);
AVERT(Bool, arenaHigh);
AVERT(Bool, firstFit);

View file

@ -1,7 +1,7 @@
/* ref.c: REFERENCES
*
* $Id$
* Copyright (c) 2001 Ravenbrook Limited. See end of file for license.
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
*
* .purpose: Implement operations on Ref, RefSet, ZoneSet, and Rank.
*
@ -35,7 +35,7 @@ Bool RankSetCheck(RankSet rankSet)
/* ZoneSetOfRange -- calculate the zone set of a range of addresses */
RefSet ZoneSetOfRange(Arena arena, Addr base, Addr limit)
ZoneSet ZoneSetOfRange(Arena arena, Addr base, Addr limit)
{
Word zbase, zlimit;
@ -292,13 +292,9 @@ ZoneSet ZoneSetBlacklist(Arena arena)
}
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2001-2002 Ravenbrook Limited <http://www.ravenbrook.com/>.
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*

View file

@ -1,7 +1,7 @@
/* seg.c: SEGMENTS
*
* $Id$
* Copyright (c) 2001-2015 Ravenbrook Limited. See end of file for license.
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
*
* .design: The design for this module is <design/seg/>.
*
@ -16,14 +16,6 @@
* all current GC features, and providing full backwards compatibility
* with "old-style" segments. It may be subclassed by clients of the
* module.
*
* TRANSGRESSIONS
*
* .check.shield: The "pm", "sm", and "depth" fields are not checked by
* SegCheck, because I haven't spent time working out the invariants.
* We should certainly work them out, by studying <code/shield.c>, and
* assert things about shielding, protection, shield cache consistency,
* etc. richard 1997-04-03
*/
#include "tract.h"
@ -157,7 +149,9 @@ static Res SegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
seg->grey = TraceSetEMPTY;
seg->pm = AccessSetEMPTY;
seg->sm = AccessSetEMPTY;
seg->defer = WB_DEFER_INIT;
seg->depth = 0;
seg->queued = FALSE;
seg->firstTract = NULL;
seg->sig = SegSig; /* set sig now so tract checks will see it */
@ -213,6 +207,11 @@ static void SegFinish(Seg seg)
AVERT(SegClass, class);
arena = PoolArena(SegPool(seg));
/* TODO: It would be good to avoid deprotecting segments eagerly
when we free them, especially if they're going to be
unmapped. This would require tracking of protection independent
of the existence of a SegStruct. */
if (seg->sm != AccessSetEMPTY) {
ShieldLower(arena, seg, seg->sm);
}
@ -223,7 +222,10 @@ static void SegFinish(Seg seg)
seg->rankSet = RankSetEMPTY;
/* See <code/shield.c#shield.flush> */
ShieldFlush(PoolArena(SegPool(seg)));
AVER(seg->depth == 0);
if (seg->queued)
ShieldFlush(PoolArena(SegPool(seg)));
AVER(!seg->queued);
limit = SegLimit(seg);
@ -572,7 +574,8 @@ Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi)
AVER(SegBase(segHi) == SegLimit(segLo));
arena = PoolArena(SegPool(segLo));
ShieldFlush(arena); /* see <design/seg/#split-merge.shield> */
if (segLo->queued || segHi->queued)
ShieldFlush(arena); /* see <design/seg/#split-merge.shield> */
/* Invoke class-specific methods to do the merge */
res = class->merge(segLo, segHi, base, mid, limit);
@ -624,7 +627,9 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at)
* the split point. */
AVER(SegBuffer(seg) == NULL || BufferLimit(SegBuffer(seg)) <= at);
ShieldFlush(arena); /* see <design/seg/#split-merge.shield> */
if (seg->queued)
ShieldFlush(arena); /* see <design/seg/#split-merge.shield> */
AVER(SegSM(seg) == SegPM(seg));
/* Allocate the new segment object from the control pool */
res = ControlAlloc(&p, arena, class->size);
@ -682,6 +687,8 @@ Bool SegCheck(Seg seg)
CHECKL(AddrIsArenaGrain(TractBase(seg->firstTract), arena));
CHECKL(AddrIsArenaGrain(seg->limit, arena));
CHECKL(seg->limit > TractBase(seg->firstTract));
/* Can't BoolCheck seg->queued because compilers warn about that on
single-bit fields. */
/* Each tract of the segment must agree about white traces. Note
* that even if the CHECKs are compiled away there is still a
@ -710,8 +717,17 @@ Bool SegCheck(Seg seg)
/* CHECKL(RingNext(&seg->poolRing) != &seg->poolRing); */
CHECKD_NOSIG(Ring, &seg->poolRing);
/* Shield invariants -- see design.mps.shield. */
/* "pm", "sm", and "depth" not checked. See .check.shield. */
/* The protection mode is never more than the shield mode
(design.mps.shield.inv.prot.shield). */
CHECKL(BS_DIFF(seg->pm, seg->sm) == 0);
/* All unsynced segments have positive depth or are in the queue
(design.mps.shield.inv.unsynced.depth). */
CHECKL(seg->sm == seg->pm || seg->depth > 0 || seg->queued);
CHECKL(RankSetCheck(seg->rankSet));
if (seg->rankSet == RankSetEMPTY) {
/* <design/seg/#field.rankSet.empty>: If there are no refs */
@ -730,6 +746,7 @@ Bool SegCheck(Seg seg)
/* write shielded. */
/* CHECKL(seg->_summary == RefSetUNIV || (seg->_sm & AccessWRITE)); */
/* @@@@ What can be checked about the read barrier? */
/* TODO: Need gcSegCheck? What does RankSet imply about being a gcSeg? */
}
return TRUE;
}
@ -891,9 +908,11 @@ static Res segTrivMerge(Seg seg, Seg segHi,
AVER(seg->pm == segHi->pm);
AVER(seg->sm == segHi->sm);
AVER(seg->depth == segHi->depth);
AVER(seg->queued == segHi->queued);
/* Neither segment may be exposed, or in the shield cache */
/* See <design/seg/#split-merge.shield> & <code/shield.c#def.depth> */
AVER(seg->depth == 0);
AVER(!seg->queued);
/* no need to update fields which match. See .similar */
@ -930,7 +949,6 @@ static Res segNoSplit(Seg seg, Seg segHi,
AVER(SegLimit(seg) == limit);
NOTREACHED;
return ResFAIL;
}
@ -956,9 +974,10 @@ static Res segTrivSplit(Seg seg, Seg segHi,
AVER(SegBase(seg) == base);
AVER(SegLimit(seg) == limit);
/* Segment may not be exposed, or in the shield cache */
/* Segment may not be exposed, or in the shield queue */
/* See <design/seg/#split-merge.shield> & <code/shield.c#def.depth> */
AVER(seg->depth == 0);
AVER(!seg->queued);
/* Full initialization for segHi. Just modify seg. */
seg->limit = mid;
@ -970,6 +989,7 @@ static Res segTrivSplit(Seg seg, Seg segHi,
segHi->pm = seg->pm;
segHi->sm = seg->sm;
segHi->depth = seg->depth;
segHi->queued = seg->queued;
segHi->firstTract = NULL;
segHi->class = seg->class;
segHi->sig = SegSig;
@ -1311,6 +1331,16 @@ static void gcSegSetRankSet(Seg seg, RankSet rankSet)
}
static void gcSegSyncWriteBarrier(Seg seg, Arena arena)
{
/* Can't check seg -- this function enforces invariants tested by SegCheck. */
if (SegSummary(seg) == RefSetUNIV)
ShieldLower(arena, seg, AccessWRITE);
else
ShieldRaise(arena, seg, AccessWRITE);
}
/* gcSegSetSummary -- GCSeg method to change the summary on a segment
*
* In fact, we only need to raise the write barrier if the
@ -1323,7 +1353,6 @@ static void gcSegSetRankSet(Seg seg, RankSet rankSet)
static void gcSegSetSummary(Seg seg, RefSet summary)
{
GCSeg gcseg;
RefSet oldSummary;
Arena arena;
AVERT_CRITICAL(Seg, seg); /* .seg.method.check */
@ -1332,19 +1361,11 @@ static void gcSegSetSummary(Seg seg, RefSet summary)
AVER_CRITICAL(&gcseg->segStruct == seg);
arena = PoolArena(SegPool(seg));
oldSummary = gcseg->summary;
gcseg->summary = summary;
AVER(seg->rankSet != RankSetEMPTY);
/* Note: !RefSetSuper is a test for a strict subset */
if (!RefSetSuper(summary, RefSetUNIV)) {
if (RefSetSuper(oldSummary, RefSetUNIV))
ShieldRaise(arena, seg, AccessWRITE);
} else {
if (!RefSetSuper(oldSummary, RefSetUNIV))
ShieldLower(arena, seg, AccessWRITE);
}
gcSegSyncWriteBarrier(seg, arena);
}
@ -1353,7 +1374,6 @@ static void gcSegSetSummary(Seg seg, RefSet summary)
static void gcSegSetRankSummary(Seg seg, RankSet rankSet, RefSet summary)
{
GCSeg gcseg;
Bool wasShielded, willbeShielded;
Arena arena;
AVERT_CRITICAL(Seg, seg); /* .seg.method.check */
@ -1369,17 +1389,11 @@ static void gcSegSetRankSummary(Seg seg, RankSet rankSet, RefSet summary)
arena = PoolArena(SegPool(seg));
wasShielded = (seg->rankSet != RankSetEMPTY && gcseg->summary != RefSetUNIV);
willbeShielded = (rankSet != RankSetEMPTY && summary != RefSetUNIV);
seg->rankSet = BS_BITFIELD(Rank, rankSet);
gcseg->summary = summary;
if (willbeShielded && !wasShielded) {
ShieldRaise(arena, seg, AccessWRITE);
} else if (wasShielded && !willbeShielded) {
ShieldLower(arena, seg, AccessWRITE);
}
if (rankSet != RankSetEMPTY)
gcSegSyncWriteBarrier(seg, arena);
}
@ -1674,7 +1688,7 @@ void SegClassMixInNoSplitMerge(SegClass class)
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2001-2015 Ravenbrook Limited <http://www.ravenbrook.com/>.
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*

View file

@ -1,75 +1,13 @@
/* shield.c: SHIELD IMPLEMENTATION
*
* $Id$
* Copyright (c) 2001-2015 Ravenbrook Limited. See end of file for license.
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
*
* See: idea.shield, design.mps.shield.
*
* This implementation of the shield avoids suspending threads for
* as long as possible. When threads are suspended, it maintains a
* cache of covered segments where the desired and actual protection
* do not match. This cache is flushed on leaving the shield.
*
*
* Definitions
*
* .def.synced: a seg is synced if the prot and shield modes are the
* same, and unsynced otherwise.
* .def.depth: the depth of a segment is defined as
* depth == #exposes - #covers + #(in cache), where
* #exposes = the total number of times the seg has been exposed
* #covers = the total number of times the seg has been covered
* #(in cache) = the number of times the seg appears in the cache
* The cache is initially empty and Cover should not be called
* without a matching Expose, so this figure should always be
* non-negative.
* .def.total.depth: The total depth is the sum of the depth over
* all segments
* .def.outside: being outside the shield is being between calls
* to leave and enter, and similarly .def.inside: being inside the
* shield is being between calls to enter and leave.
* .def.suspended: suspended is true iff the mutator is suspended.
* .def.shielded: a segment is shielded if the shield mode is non-zero.
*
*
* Properties
*
* .prop.outside.running: The mutator may not be suspended while
* outside the shield.
* .prop.mutator.access: An attempt by the mutator to access
* shielded memory must cause an ArenaAccess.
* .prop.inside.access: Inside the shield it must be possible to access
* all unshielded segments and all exposed segments.
*
*
* Invariants
*
* These invariants are maintained by the code.
*
* .inv.outside.running: The mutator is not suspended while outside the
* shield.
* .inv.unsynced.suspended: If any segment is not synced,
* the mutator is suspended.
* .inv.unsynced.depth: All unsynced segments have positive depth.
* .inv.outside.depth: The total depth is zero while outside the shield.
* .inv.prot.shield: The prot mode is never more than the shield mode.
* .inv.expose.prot: An exposed seg is not protected.
*
* Hints at proofs of properties from invariants
*
* inv.outside.running directly ensures prop.outside running.
*
* As the depth of a segment cannot be negative
* total depth == 0 => for all segments, depth == 0
* => all segs are synced (by .inv.unsynced.depth)
*
* If the mutator is running then all segs must be synced
* (.inv.unsynced.suspend). Which means that the hardware protection
* (prot mode) must reflect the software protection (shield mode).
* Hence all shielded memory will be hardware protected while the
* mutator is running. This ensures .prop.mutator.access.
*
* inv.prot.shield and inv.expose.prot ensure prop.inside.access.
* IMPORTANT: HERE BE DRAGONS! This code is subtle and
* critical. Ensure you have read and understood design.mps.shield
* before you touch it.
*/
#include "mpm.h"
@ -77,269 +15,751 @@
SRCID(shield, "$Id$");
void (ShieldSuspend)(Arena arena)
void ShieldInit(Shield shield)
{
AVERT(Arena, arena);
AVER(arena->insideShield);
shield->inside = FALSE;
shield->queue = NULL;
shield->length = 0;
shield->next = 0;
shield->limit = 0;
shield->depth = 0;
shield->unsynced = 0;
shield->holds = 0;
shield->suspended = FALSE;
shield->sig = ShieldSig;
}
if (!arena->suspended) {
ThreadRingSuspend(ArenaThreadRing(arena), ArenaDeadRing(arena));
arena->suspended = TRUE;
void ShieldDestroyQueue(Shield shield, Arena arena)
{
AVER(shield->limit == 0); /* queue must be empty */
if (shield->length != 0) {
AVER(shield->queue != NULL);
ControlFree(arena, shield->queue,
shield->length * sizeof shield->queue[0]);
shield->queue = NULL;
shield->length = 0;
}
}
void (ShieldResume)(Arena arena)
void ShieldFinish(Shield shield)
{
AVERT(Arena, arena);
AVER(arena->insideShield);
AVER(arena->suspended);
/* It is only correct to actually resume the mutator here if shDepth is 0 */
/* The queue should already have been destroyed by
GlobalsPrepareToDestroy calling ShieldDestroyQueue. */
AVER(shield->length == 0);
AVER(shield->limit == 0);
AVER(shield->queue == NULL);
AVER(shield->depth == 0);
AVER(shield->unsynced == 0);
AVER(shield->holds == 0);
shield->sig = SigInvalid;
}
/* This ensures actual prot mode does not include mode */
static void protLower(Arena arena, Seg seg, AccessSet mode)
{
/* <design/trace/#fix.noaver> */
AVERT_CRITICAL(Arena, arena);
UNUSED(arena);
AVERT_CRITICAL(Seg, seg);
AVERT_CRITICAL(AccessSet, mode);
static Bool SegIsSynced(Seg seg);
if (SegPM(seg) & mode) {
SegSetPM(seg, SegPM(seg) & ~mode);
Bool ShieldCheck(Shield shield)
{
CHECKS(Shield, shield);
CHECKL(BoolCheck(shield->inside));
CHECKL(shield->queue == NULL || shield->length > 0);
CHECKL(shield->limit <= shield->length);
CHECKL(shield->next <= shield->limit);
CHECKL(BoolCheck(shield->suspended));
/* The mutator is not suspended while outside the shield
(design.mps.shield.inv.outside.running). */
CHECKL(shield->inside || !shield->suspended);
/* If any segment is not synced, the mutator is suspended
(design.mps.shield.inv.unsynced.suspended). */
CHECKL(shield->unsynced == 0 || shield->suspended);
/* If any segment is exposed, the mutator is suspended. */
CHECKL(shield->depth == 0 || shield->suspended);
/* The total depth is zero while outside the shield
(design.mps.shield.inv.outside.depth). */
CHECKL(shield->inside || shield->depth == 0);
/* There are no unsynced segments when we're outside the shield. */
CHECKL(shield->inside || shield->unsynced == 0);
/* Every unsynced segment should be on the queue, because we have to
remember to sync it before we return to the mutator. */
CHECKL(shield->limit >= shield->unsynced);
/* The mutator is suspeneded if there are any holds. */
CHECKL(shield->holds == 0 || shield->suspended);
/* This is too expensive to check all the time since we have an
expanding shield queue that often has 16K elements instead of
16. */
#if defined(AVER_AND_CHECK_ALL)
{
Count depth = 0;
Count unsynced = 0;
Index i;
for (i = 0; i < shield->limit; ++i) {
Seg seg = shield->queue[i];
CHECKD(Seg, seg);
depth += SegDepth(seg);
if (!SegIsSynced(seg))
++unsynced;
}
CHECKL(depth == shield->depth);
CHECKL(unsynced == shield->unsynced);
}
#endif
return TRUE;
}
Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth)
{
Res res;
res = WriteF(stream, depth,
"Shield $P {\n", (WriteFP)shield,
" ", shield->inside ? "inside" : "outside", " shield\n",
" suspended $S\n", WriteFYesNo(shield->suspended),
" depth $U\n", (WriteFU)shield->depth,
" next $U\n", (WriteFU)shield->next,
" length $U\n", (WriteFU)shield->length,
" unsynced $U\n", (WriteFU)shield->unsynced,
" holds $U\n", (WriteFU)shield->holds,
"} Shield $P\n", (WriteFP)shield,
NULL);
if (res != ResOK)
return res;
return ResOK;
}
/* SHIELD_AVER -- transgressive argument checking
*
* .trans.check: A number of shield functions cannot do normal
* argument checking with AVERT because (for example) SegCheck checks
* the shield invariants, and it is these functions that are enforcing
* them. Instead, we AVER(TESTT(Seg, seg)) to check the type
* signature but not the contents.
*/
#define SHIELD_AVERT(type, exp) AVER(TESTT(type, exp))
#define SHIELD_AVERT_CRITICAL(type, exp) AVER_CRITICAL(TESTT(type, exp))
/* SegIsSynced -- is a segment synced?
*
* See design.mps.shield.def.synced.
*/
static Bool SegIsSynced(Seg seg)
{
SHIELD_AVERT_CRITICAL(Seg, seg);
return SegSM(seg) == SegPM(seg);
}
/* shieldSetSM -- set shield mode, maintaining sync count */
static void shieldSetSM(Shield shield, Seg seg, AccessSet mode)
{
if (SegSM(seg) != mode) {
if (SegIsSynced(seg)) {
SegSetSM(seg, mode);
++shield->unsynced;
} else {
SegSetSM(seg, mode);
if (SegIsSynced(seg)) {
AVER(shield->unsynced > 0);
--shield->unsynced;
}
}
}
}
/* shieldSetPM -- set protection mode, maintaining sync count */
static void shieldSetPM(Shield shield, Seg seg, AccessSet mode)
{
if (SegPM(seg) != mode) {
if (SegIsSynced(seg)) {
SegSetPM(seg, mode);
++shield->unsynced;
} else {
SegSetPM(seg, mode);
if (SegIsSynced(seg)) {
AVER(shield->unsynced > 0);
--shield->unsynced;
}
}
}
}
/* SegIsExposed -- is a segment exposed?
*
* See design.mps.shield.def.exposed.
*/
static Bool SegIsExposed(Seg seg)
{
SHIELD_AVERT_CRITICAL(Seg, seg);
return seg->depth > 0;
}
/* shieldSync -- synchronize a segment's protection
*
* See design.mps.shield.inv.prot.shield.
*/
static void shieldSync(Shield shield, Seg seg)
{
SHIELD_AVERT_CRITICAL(Seg, seg);
if (!SegIsSynced(seg)) {
shieldSetPM(shield, seg, SegSM(seg));
ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg));
}
}
static void shieldSync(Arena arena, Seg seg)
{
AVERT(Arena, arena);
AVERT(Seg, seg);
/* shieldSUspend -- suspend the mutator
*
* Called from inside impl.c.shield when any segment is not synced, in
* order to provide exclusive access to the segment by the MPS. See
* .inv.unsynced.suspended.
*/
if (SegPM(seg) != SegSM(seg)) {
ProtSet(SegBase(seg), SegLimit(seg), SegSM(seg));
SegSetPM(seg, SegSM(seg));
/* inv.prot.shield */
static void shieldSuspend(Arena arena)
{
Shield shield;
AVERT(Arena, arena);
shield = ArenaShield(arena);
AVER(shield->inside);
if (!shield->suspended) {
ThreadRingSuspend(ArenaThreadRing(arena), ArenaDeadRing(arena));
shield->suspended = TRUE;
}
}
static void flush(Arena arena, Size i)
/* ShieldHold -- suspend mutator access to the unprotectable
*
* From outside impl.c.shield, this is used when we really need to
* lock everything against the mutator -- for example, during flip
* when we must scan all thread registers at once.
*/
void (ShieldHold)(Arena arena)
{
Seg seg;
AVERT(Arena, arena);
AVER(i < arena->shCacheLimit);
seg = arena->shCache[i];
if (seg == NULL)
return;
AVERT(Seg, seg);
AVER(arena->shDepth > 0);
AVER(SegDepth(seg) > 0);
--arena->shDepth;
SegSetDepth(seg, SegDepth(seg) - 1);
if (SegDepth(seg) == 0)
shieldSync(arena, seg);
arena->shCache[i] = NULL;
shieldSuspend(arena);
++ArenaShield(arena)->holds;
}
/* If the segment is out of sync, either sync it, or ensure
* depth > 0, and the arena is suspended.
/* ShieldRelease -- declare mutator could be resumed
*
* In practice, we don't resume the mutator until ShieldLeave, but
* this marks the earliest point at which we could resume.
*/
static void cache(Arena arena, Seg seg)
void (ShieldRelease)(Arena arena)
{
Shield shield;
AVERT(Arena, arena);
shield = ArenaShield(arena);
AVER(shield->inside);
AVER(shield->suspended);
AVER(shield->holds > 0);
--shield->holds;
/* It is only correct to actually resume the mutator here if
shield->depth is 0, shield->unsycned is 0, and the queue is
empty. */
/* See design.mps.shield.improv.resume for a discussion of when it
might be a good idea to resume the mutator early. */
}
/* shieldProtLower -- reduce protection on a segment
*
* This ensures actual prot mode does not include mode.
*/
static void shieldProtLower(Shield shield, Seg seg, AccessSet mode)
{
/* <design/trace/#fix.noaver> */
AVERT_CRITICAL(Arena, arena);
AVERT_CRITICAL(Seg, seg);
SHIELD_AVERT_CRITICAL(Seg, seg);
AVERT_CRITICAL(AccessSet, mode);
if (SegSM(seg)
== SegPM(seg)) return;
if (SegDepth(seg) > 0) {
ShieldSuspend(arena);
return;
}
if (ShieldCacheSIZE == 0 || !arena->suspended)
shieldSync(arena, seg);
else {
SegSetDepth(seg, SegDepth(seg) + 1);
++arena->shDepth;
AVER(arena->shDepth > 0);
AVER(SegDepth(seg) > 0);
AVER(arena->shCacheLimit <= ShieldCacheSIZE);
AVER(arena->shCacheI < arena->shCacheLimit);
flush(arena, arena->shCacheI);
arena->shCache[arena->shCacheI] = seg;
++arena->shCacheI;
if (arena->shCacheI == ShieldCacheSIZE)
arena->shCacheI = 0;
if (arena->shCacheI == arena->shCacheLimit)
++arena->shCacheLimit;
if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY) {
shieldSetPM(shield, seg, BS_DIFF(SegPM(seg), mode));
ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg));
}
}
void (ShieldRaise) (Arena arena, Seg seg, AccessSet mode)
/* shieldDequeue -- remove a segment from the shield queue */
static Seg shieldDequeue(Shield shield, Index i)
{
/* .seg.broken: Seg's shield invariants may not be true at */
/* this point (this function is called to enforce them) so we */
/* can't check seg. Nor can we check arena as that checks the */
/* segs in the cache. */
Seg seg;
AVER(i < shield->limit);
seg = shield->queue[i];
AVERT(Seg, seg);
AVER(seg->queued);
shield->queue[i] = NULL; /* to ensure it can't get re-used */
seg->queued = FALSE;
return seg;
}
/* shieldFlushEntry -- flush a single entry from the queue
*
* If the segment is exposed we can simply dequeue it, because later
* there will be a call to ShieldCover that will put it back on the
* queue. If the segment is not exposed, we can sync its protection.
* (And if it does not have the shield raised any more, that will do
* nothing.)
*/
static void shieldFlushEntry(Shield shield, Index i)
{
Seg seg = shieldDequeue(shield, i);
if (!SegIsExposed(seg))
shieldSync(shield, seg);
}
/* shieldQueueReset -- reset shield queue pointers */
static void shieldQueueReset(Shield shield)
{
AVER(shield->depth == 0); /* overkill: implies no segs are queued */
AVER(shield->unsynced == 0);
shield->next = 0;
shield->limit = 0;
}
/* shieldQueueEntryCompare -- comparison for queue sorting */
static Compare shieldAddrCompare(Addr left, Addr right)
{
if (left < right)
return CompareLESS;
else if (left == right)
return CompareEQUAL;
else
return CompareGREATER;
}
static Compare shieldQueueEntryCompare(void *left, void *right, void *closure)
{
Seg segA = left, segB = right;
/* These checks are not critical in a hot build, but slow down cool
builds quite a bit, so just check the signatures. */
AVER(TESTT(Seg, segA));
AVER(TESTT(Seg, segB));
UNUSED(closure);
return shieldAddrCompare(SegBase(segA), SegBase(segB));
}
/* shieldFlushEntries -- flush queue coalescing protects
*
* Sort the shield queue into address order, then iterate over it
* coalescing protection work, in order to reduce the number of system
* calls to a minimum. This is very important on OS X, where
* protection calls are extremely inefficient, but has no net gain on
* Windows.
*
* TODO: Could we keep extending the outstanding area over memory
* that's *not* in the queue but has the same protection mode? Might
* require design.mps.shield.improve.noseg.
*/
static void shieldFlushEntries(Shield shield)
{
Addr base = NULL, limit;
AccessSet mode;
Index i;
if (shield->length == 0) {
AVER(shield->queue == NULL);
return;
}
QuickSort((void *)shield->queue, shield->limit,
shieldQueueEntryCompare, UNUSED_POINTER,
&shield->sortStruct);
mode = AccessSetEMPTY;
limit = NULL;
for (i = 0; i < shield->limit; ++i) {
Seg seg = shieldDequeue(shield, i);
if (!SegIsSynced(seg)) {
shieldSetPM(shield, seg, SegSM(seg));
if (SegSM(seg) != mode || SegBase(seg) != limit) {
if (base != NULL) {
AVER(base < limit);
ProtSet(base, limit, mode);
}
base = SegBase(seg);
mode = SegSM(seg);
}
limit = SegLimit(seg);
}
}
if (base != NULL) {
AVER(base < limit);
ProtSet(base, limit, mode);
}
shieldQueueReset(shield);
}
/* shieldQueue -- consider adding a segment to the queue
*
* If the segment is out of sync, either sync it, or ensure it is
* queued and the mutator is suspended.
*/
static void shieldQueue(Arena arena, Seg seg)
{
Shield shield;
/* <design/trace/#fix.noaver> */
AVERT_CRITICAL(Arena, arena);
shield = ArenaShield(arena);
SHIELD_AVERT_CRITICAL(Seg, seg);
if (SegIsSynced(seg) || seg->queued)
return;
if (SegIsExposed(seg)) {
/* This can occur if the mutator isn't suspended, we expose a
segment, then raise the shield on it. In this case, the
mutator isn't allowed to see the segment, but we don't need to
queue it until its covered. */
shieldSuspend(arena);
return;
}
/* Allocate or extend the shield queue if necessary. */
if (shield->next >= shield->length) {
void *p;
Res res;
Count length;
AVER(shield->next == shield->length);
if (shield->length == 0)
length = ShieldQueueLENGTH;
else
length = shield->length * 2;
res = ControlAlloc(&p, arena, length * sizeof shield->queue[0]);
if (res != ResOK) {
AVER(ResIsAllocFailure(res));
/* Carry on with the existing queue. */
} else {
if (shield->length > 0) {
Size oldSize = shield->length * sizeof shield->queue[0];
AVER(shield->queue != NULL);
mps_lib_memcpy(p, shield->queue, oldSize);
ControlFree(arena, shield->queue, oldSize);
}
shield->queue = p;
shield->length = length;
}
}
/* Queue unavailable, so synchronize now. Or if the mutator is not
yet suspended and the code raises the shield on a covered
segment, protect it now, because that's probably better than
suspending the mutator. */
if (shield->length == 0 || !shield->suspended) {
shieldSync(shield, seg);
return;
}
AVER_CRITICAL(shield->limit <= shield->length);
AVER_CRITICAL(shield->next <= shield->limit);
/* If we failed to extend the shield queue array, degrade to an LRU
circular buffer. */
if (shield->next >= shield->length)
shield->next = 0;
AVER_CRITICAL(shield->next < shield->length);
AVER_CRITICAL(shield->length > 0);
/* If the limit is less than the length, then the queue array has
yet to be filled, and next is an uninitialized entry.
Otherwise it's the tail end from last time around, and needs to
be flushed. */
if (shield->limit >= shield->length) {
AVER_CRITICAL(shield->limit == shield->length);
shieldFlushEntry(shield, shield->next);
}
shield->queue[shield->next] = seg;
++shield->next;
seg->queued = TRUE;
if (shield->next >= shield->limit)
shield->limit = shield->next;
}
/* ShieldRaise -- declare segment should be protected from mutator
*
* Does not immediately protect the segment, unless the segment is
* covered and the shield queue is unavailable.
*/
void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode)
{
SHIELD_AVERT(Arena, arena);
SHIELD_AVERT(Seg, seg);
AVERT(AccessSet, mode);
AVER((SegSM(seg) & mode) == AccessSetEMPTY);
SegSetSM(seg, SegSM(seg) | mode); /* inv.prot.shield preserved */
/* ensure inv.unsynced.suspended & inv.unsynced.depth */
cache(arena, seg);
/* design.mps.shield.inv.prot.shield preserved */
shieldSetSM(ArenaShield(arena), seg, BS_UNION(SegSM(seg), mode));
/* Ensure design.mps.shield.inv.unsynced.suspended and
design.mps.shield.inv.unsynced.depth */
shieldQueue(arena, seg);
/* Check queue and segment consistency. */
AVERT(Arena, arena);
AVERT(Seg, seg);
}
/* ShieldLower -- declare segment may be accessed by mutator */
void (ShieldLower)(Arena arena, Seg seg, AccessSet mode)
{
/* Don't check seg or arena, see .seg.broken */
Shield shield;
AVERT(Arena, arena);
shield = ArenaShield(arena);
SHIELD_AVERT(Seg, seg);
AVERT(AccessSet, mode);
AVER((SegSM(seg) & mode) == mode);
/* synced(seg) is not changed by the following
* preserving inv.unsynced.suspended
* Also inv.prot.shield preserved
*/
SegSetSM(seg, SegSM(seg) & ~mode);
protLower(arena, seg, mode);
/* SegIsSynced(seg) is not changed by the following preserving
design.mps.shield.inv.unsynced.suspended and
design.mps.shield.inv.prot.shield. */
shieldSetSM(shield, seg, BS_DIFF(SegSM(seg), mode));
/* TODO: Do we need to promptly call shieldProtLower here? It
loses the opportunity to coalesce the protection call. It would
violate design.mps.shield.prop.inside.access. */
/* shieldQueue(arena, seg); */
shieldProtLower(shield, seg, mode);
/* Check queue and segment consistency. */
AVERT(Arena, arena);
AVERT(Seg, seg);
}
/* ShieldEnter -- enter the shield, allowing exposes */
void (ShieldEnter)(Arena arena)
{
Size i;
Shield shield;
AVERT(Arena, arena);
shield = ArenaShield(arena);
AVER(!shield->inside);
AVER(shield->depth == 0);
AVER(!shield->suspended);
shieldQueueReset(shield);
shield->inside = TRUE;
}
/* shieldDebugCheck -- expensive consistency check
*
* While developing the shield it is very easy to make a consistency
* mistake that causes random corruption of the heap, usually because
* all the attempts to avoid protection and suspension end up failing
* to enforce design.mps.shield.prop.mutator.access. In these cases,
* try enabling SHIELD_DEBUG and extending this code as necessary.
*
* The basic idea is to iterate over *all* segments and check
* consistency with the arena and shield queue.
*/
#if defined(SHIELD_DEBUG)
static void shieldDebugCheck(Arena arena)
{
Shield shield;
Seg seg;
Count queued = 0;
AVERT(Arena, arena);
AVER(!arena->insideShield);
AVER(arena->shDepth == 0);
AVER(!arena->suspended);
AVER(arena->shCacheLimit <= ShieldCacheSIZE);
AVER(arena->shCacheI < arena->shCacheLimit);
for(i = 0; i < arena->shCacheLimit; i++)
AVER(arena->shCache[i] == NULL);
shield = ArenaShield(arena);
AVER(shield->inside || shield->limit == 0);
arena->shCacheI = (Size)0;
arena->shCacheLimit = (Size)1;
arena->insideShield = TRUE;
if (SegFirst(&seg, arena))
do {
if (shield->limit == 0) {
AVER(!seg->queued);
AVER(SegIsSynced(seg));
/* You can directly set protections here to see if it makes a
difference. */
/* ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); */
} else {
if (seg->queued)
++queued;
}
} while(SegNext(&seg, arena, seg));
AVER(queued == shield->limit);
}
#endif
/* .shield.flush: Flush empties the shield cache.
* This needs to be called before segments are destroyed as there
* may be references to them in the cache.
/* ShieldFlush -- empty the shield queue
*
* .shield.flush: Flush empties the shield queue. This needs to be
* called before queued segments are destroyed, to remove them from
* the queue. We flush the whole queue because finding the entry is
* O(n) and we're very likely reclaiming and destroying loads of
* segments. See also design.mps.shield.improv.resume.
*
* The memory for the segment may become spare, and not released back
* to the operating system. Since we keep track of protection on
* segments and not grains we have no way of keeping track of the
* protection state of spare grains. We therefore flush the protection
* to get it back into the default state (unprotected). See also
* design.mps.shield.improv.noseg.
*/
void (ShieldFlush)(Arena arena)
{
Size i;
for(i = 0; i < arena->shCacheLimit; ++i) {
if (arena->shDepth == 0)
break;
flush(arena, i);
}
Shield shield;
AVERT(Arena, arena);
shield = ArenaShield(arena);
#ifdef SHIELD_DEBUG
shieldDebugCheck(arena);
#endif
shieldFlushEntries(shield);
AVER(shield->unsynced == 0); /* everything back in sync */
#ifdef SHIELD_DEBUG
shieldDebugCheck(arena);
#endif
}
/* ShieldLeave -- leave the shield, protect segs from mutator */
void (ShieldLeave)(Arena arena)
{
Shield shield;
AVERT(Arena, arena);
AVER(arena->insideShield);
shield = ArenaShield(arena);
AVER(shield->inside);
AVER(shield->depth == 0); /* no pending covers */
AVER(shield->holds == 0);
ShieldFlush(arena);
/* Cache is empty so inv.outside.depth holds */
AVER(arena->shDepth == 0);
/* Ensuring the mutator is running at this point
* guarantees inv.outside.running */
if (arena->suspended) {
AVER(shield->unsynced == 0); /* everything back in sync */
/* Ensuring the mutator is running at this point guarantees
.inv.outside.running */
if (shield->suspended) {
ThreadRingResume(ArenaThreadRing(arena), ArenaDeadRing(arena));
arena->suspended = FALSE;
shield->suspended = FALSE;
}
arena->insideShield = FALSE;
shield->inside = FALSE;
}
/* ShieldExpose -- allow the MPS access to a segment while denying the mutator
*
* The MPS currently does not collect concurrently, however the only thing
* that makes it not-concurrent is a critical point in the Shield
* abstraction where the MPS seeks to gain privileged access to memory
* (usually in order to scan it for GC). The critical point is where
* ShieldExpose in shield.c has to call ShieldSuspend to preserve the
* shield invariants. This is the only point in the MPS that prevents
* concurrency, and the rest of the MPS is designed to support it.
*
* The restriction could be removed if either:
*
* * the MPS could use a different set of protections to the mutator
* program
*
* * the mutator program uses a software barrier
*
* The first one is tricky, and the second one just hasn't come up in any
* implementation we've been asked to make yet. Given a VM, it could
* happen, and the MPS would be concurrent.
*
* So, I believe there's nothing fundamentally non-concurrent about the
* MPS design. It's kind of waiting to happen.
*
* (Originally written at <http://news.ycombinator.com/item?id=4524036>.)
* The first expose of a shielded segment suspends the mutator to
* ensure the MPS has exclusive access.
*/
void (ShieldExpose)(Arena arena, Seg seg)
{
Shield shield;
AccessSet mode = AccessREAD | AccessWRITE;
/* <design/trace/#fix.noaver> */
AVERT_CRITICAL(Arena, arena);
AVER_CRITICAL(arena->insideShield);
shield = ArenaShield(arena);
AVER_CRITICAL(shield->inside);
SegSetDepth(seg, SegDepth(seg) + 1);
++arena->shDepth;
/* <design/trace/#fix.noaver> */
AVER_CRITICAL(arena->shDepth > 0);
AVER_CRITICAL(SegDepth(seg) > 0);
if (SegPM(seg) & mode)
ShieldSuspend(arena);
AVER_CRITICAL(SegDepth(seg) > 0); /* overflow */
++shield->depth;
AVER_CRITICAL(shield->depth > 0); /* overflow */
if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY)
shieldSuspend(arena);
/* This ensures inv.expose.prot */
protLower(arena, seg, mode);
/* Ensure design.mps.shield.inv.expose.prot. */
/* TODO: Mass exposure -- see
design.mps.shield.improv.mass-expose. */
shieldProtLower(shield, seg, mode);
}
/* ShieldCover -- declare MPS no longer needs access to seg */
void (ShieldCover)(Arena arena, Seg seg)
{
Shield shield;
/* <design/trace/#fix.noaver> */
AVERT_CRITICAL(Arena, arena);
shield = ArenaShield(arena);
AVERT_CRITICAL(Seg, seg);
AVER_CRITICAL(SegPM(seg) == AccessSetEMPTY);
AVER_CRITICAL(arena->shDepth > 0);
AVER_CRITICAL(SegDepth(seg) > 0);
SegSetDepth(seg, SegDepth(seg) - 1);
--arena->shDepth;
AVER_CRITICAL(shield->depth > 0);
--shield->depth;
/* ensure inv.unsynced.depth */
cache(arena, seg);
/* Ensure design.mps.shield.inv.unsynced.depth. */
shieldQueue(arena, seg);
}
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2001-2015 Ravenbrook Limited <http://www.ravenbrook.com/>.
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*

View file

@ -220,14 +220,30 @@ double rnd_double(void)
return rnd() / R_m_float;
}
static unsigned sizelog2(size_t size)
{
return (unsigned)(log((double)size) / log(2.0));
}
size_t rnd_grain(size_t arena_size)
{
/* The grain size must be small enough to allow for a complete set
* of zones in the initial chunk. */
size_t s = (size_t)(log((double)arena_size) / log(2.0));
size_t shift = MPS_WORD_SHIFT;
Insist(s > shift);
return (size_t)1 << (rnd() % (s - shift));
of zones in the initial chunk, but bigger than one word. */
Insist(arena_size >> MPS_WORD_SHIFT >= sizeof(void *));
return rnd_align(sizeof(void *), (size_t)1 << sizelog2(arena_size >> MPS_WORD_SHIFT));
}
size_t rnd_align(size_t min, size_t max)
{
unsigned log2min = sizelog2(min);
unsigned log2max = sizelog2(max);
Insist(min <= max);
Insist(1uL << log2min == min);
Insist(1uL << log2max == max);
if (log2min < log2max)
return min << (rnd() % (log2max - log2min + 1));
else
return min;
}
rnd_state_t rnd_seed(void)

View file

@ -260,6 +260,11 @@ extern double rnd_double(void);
extern size_t rnd_grain(size_t arena_size);
/* rnd_align -- random alignment */
extern size_t rnd_align(size_t min, size_t max);
/* randomize -- randomize the generator, or initialize to replay
*
* randomize(argc, argv) randomizes the rnd generator (using time(3))

View file

@ -408,6 +408,8 @@ Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet)
arena = trace->arena;
ShieldHold(arena); /* .whiten.hold */
if(SegFirst(&seg, arena)) {
do {
/* Segment should be black now. */
@ -430,6 +432,8 @@ Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet)
} while (SegNext(&seg, arena, seg));
}
ShieldRelease(arena);
EVENT3(TraceCondemnZones, trace, condemnedSet, trace->white);
/* The trace's white set must be a subset of the condemned set */
@ -438,6 +442,7 @@ Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet)
return ResOK;
failBegin:
ShieldRelease(arena);
AVER(TraceIsEmpty(trace)); /* See .whiten.fail. */
return res;
}
@ -573,7 +578,7 @@ static Res traceFlip(Trace trace)
arena = trace->arena;
rfc.arena = arena;
ShieldSuspend(arena);
ShieldHold(arena);
AVER(trace->state == TraceUNFLIPPED);
AVER(!TraceSetIsMember(arena->flippedTraces, trace));
@ -634,11 +639,11 @@ static Res traceFlip(Trace trace)
EVENT2(TraceFlipEnd, trace, arena);
ShieldResume(arena);
ShieldRelease(arena);
return ResOK;
failRootFlip:
ShieldResume(arena);
ShieldRelease(arena);
return res;
}
@ -732,13 +737,6 @@ Res TraceCreate(Trace *traceReturn, Arena arena, int why)
EVENT3(TraceCreate, trace, arena, (EventFU)why);
/* We suspend the mutator threads so that the PoolWhiten methods */
/* can calculate white sets without the mutator allocating in */
/* buffers under our feet. */
/* @@@@ This is a short-term fix for request.dylan.160098_. */
/* .. _request.dylan.160098: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160098 */
ShieldSuspend(arena);
STATISTIC_STAT ({
/* Iterate over all chains, all GenDescs within a chain, and all
* PoolGens within a GenDesc. */
@ -1118,6 +1116,7 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
Bool wasTotal;
ZoneSet white;
Res res;
RefSet summary;
/* The reason for scanning a segment is that it's grey. */
AVER(TraceSetInter(ts, SegGrey(seg)) != TraceSetEMPTY);
@ -1164,16 +1163,32 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
*/
AVER(RefSetSub(ScanStateUnfixedSummary(ss), SegSummary(seg)));
if(res != ResOK || !wasTotal) {
/* scan was partial, so... */
/* scanned summary should be ORed into segment summary. */
SegSetSummary(seg, RefSetUnion(SegSummary(seg), ScanStateSummary(ss)));
/* Write barrier deferral -- see design.mps.write-barrier.deferral. */
/* Did the segment refer to the white set? */
if (ZoneSetInter(ScanStateUnfixedSummary(ss), white) == ZoneSetEMPTY) {
/* Boring scan. One step closer to raising the write barrier. */
if (seg->defer > 0)
--seg->defer;
} else {
/* all objects on segment have been scanned, so... */
/* scanned summary should replace the segment summary. */
SegSetSummary(seg, ScanStateSummary(ss));
/* Interesting scan. Defer raising the write barrier. */
if (seg->defer < WB_DEFER_DELAY)
seg->defer = WB_DEFER_DELAY;
}
/* Only apply the write barrier if it is not deferred. */
if (seg->defer == 0) {
/* If we scanned every reference in the segment then we have a
complete summary we can set. Otherwise, we just have
information about more zones that the segment refers to. */
if (res == ResOK && wasTotal)
summary = ScanStateSummary(ss);
else
summary = RefSetUnion(SegSummary(seg), ScanStateSummary(ss));
} else {
summary = RefSetUNIV;
}
SegSetSummary(seg, summary);
ScanStateFinish(ss);
}
@ -1214,24 +1229,34 @@ static Res traceScanSeg(TraceSet ts, Rank rank, Arena arena, Seg seg)
void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
{
Res res;
AccessSet shieldHit;
Bool readHit, writeHit;
AVERT(Arena, arena);
AVERT(Seg, seg);
AVERT(AccessSet, mode);
shieldHit = BS_INTER(mode, SegSM(seg));
readHit = BS_INTER(shieldHit, AccessREAD) != AccessSetEMPTY;
writeHit = BS_INTER(shieldHit, AccessWRITE) != AccessSetEMPTY;
/* If it's a read access, then the segment must be grey for a trace */
/* which is flipped. */
AVER((mode & SegSM(seg) & AccessREAD) == 0
|| TraceSetInter(SegGrey(seg), arena->flippedTraces) != TraceSetEMPTY);
AVER(!readHit ||
TraceSetInter(SegGrey(seg), arena->flippedTraces) != TraceSetEMPTY);
/* If it's a write access, then the segment must have a summary that */
/* is smaller than the mutator's summary (which is assumed to be */
/* RefSetUNIV). */
AVER((mode & SegSM(seg) & AccessWRITE) == 0 || SegSummary(seg) != RefSetUNIV);
AVER(!writeHit || SegSummary(seg) != RefSetUNIV);
EVENT3(TraceAccess, arena, seg, mode);
if((mode & SegSM(seg) & AccessREAD) != 0) { /* read barrier? */
/* Write barrier deferral -- see design.mps.write-barrier.deferral. */
if (writeHit)
seg->defer = WB_DEFER_HIT;
if (readHit) {
Trace trace;
TraceId ti;
Rank rank;
@ -1265,11 +1290,11 @@ void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
/* The write barrier handling must come after the read barrier, */
/* because the latter may set the summary and raise the write barrier. */
if((mode & SegSM(seg) & AccessWRITE) != 0) /* write barrier? */
if (writeHit)
SegSetSummary(seg, RefSetUNIV);
/* The segment must now be accessible. */
AVER((mode & SegSM(seg)) == AccessSetEMPTY);
AVER(BS_INTER(mode, SegSM(seg)) == AccessSetEMPTY);
}
@ -1474,7 +1499,7 @@ Res TraceScanArea(ScanState ss, Word *base, Word *limit,
it's safe to accumulate now so that we can tail-call
scan_area. */
ss->scannedSize += AddrOffset(base, limit);
return scan_area(&ss->ss_s, base, limit, closure);
}
@ -1490,6 +1515,14 @@ static Res traceCondemnAll(Trace trace)
arena = trace->arena;
AVERT(Arena, arena);
/* .whiten.hold: We suspend the mutator threads so that the
PoolWhiten methods can calculate white sets without the mutator
allocating in buffers under our feet. See request.dylan.160098
<https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160098>. */
/* TODO: Consider how to avoid this suspend in order to implement
incremental condemn. */
ShieldHold(arena);
/* Condemn all segments in pools with the GC attribute. */
RING_FOR(poolNode, &ArenaGlobals(arena)->poolRing, nextPoolNode) {
Pool pool = RING_ELT(Pool, arenaRing, poolNode);
@ -1508,6 +1541,8 @@ static Res traceCondemnAll(Trace trace)
}
}
ShieldRelease(arena);
if (TraceIsEmpty(trace))
return ResFAIL;
@ -1528,6 +1563,7 @@ static Res traceCondemnAll(Trace trace)
* will be triggered. In that case, we'll have to recover here by
* blackening the segments again. */
AVER(TraceIsEmpty(trace));
ShieldRelease(arena);
return res;
}

72
mps/code/xci3ll.gmk Normal file
View file

@ -0,0 +1,72 @@
# -*- makefile -*-
#
# xci3ll.gmk: BUILD FOR MAC OS X/i386/Clang PLATFORM
#
# $Id$
# Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
#
# .prefer.xcode: The documented and preferred way to develop the MPS
# for this platform is to use the Xcode project (mps.xcodeproj). This
# makefile provides a way to compile the MPS one source file at a
# time, rather than all at once via mps.c (which can hide errors due
# to missing headers).
PFM = xci3ll
MPMPF = \
lockix.c \
prmci3xc.c \
proti3.c \
protix.c \
protxc.c \
span.c \
ssixi3.c \
thxc.c \
vmix.c
include ll.gmk
CC = clang -arch i386
include comm.gmk
# C. COPYRIGHT AND LICENSE
#
# Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
# All rights reserved. This is an open source license. Contact
# Ravenbrook for commercial licensing options.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Redistributions in any form must be accompanied by information on how
# to obtain complete source code for this software and any accompanying
# software that uses this software. The source code must either be
# included in the distribution or be available for no more than the cost
# of distribution plus a nominal fee, and must be freely redistributable
# under reasonable conditions. For an executable file, complete source
# code means the source code for all modules it contains. It does not
# include source code for modules or files that typically accompany the
# major components of the operating system on which the executable file
# runs.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
mps/configure vendored
View file

@ -3513,6 +3513,17 @@ $as_echo "FreeBSD x86" >&6; }
CPP="$CC -I/usr/local/include -E"
PFMCFLAGS="$CFLAGS_GC"
;;
amd64-*-freebsd*/yes | x86_64-*-freebsd*/yes)
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86_64" >&5
$as_echo "FreeBSD x86_64" >&6; }
MPS_OS_NAME=fr
MPS_ARCH_NAME=i6
MPS_BUILD_NAME=ll
# Need /usr/local/include in order to find sqlite3.h
CFLAGS="-I/usr/local/include"
CPP="$CC -I/usr/local/include -E"
PFMCFLAGS="$CFLAGS_GC"
;;
amd64-*-freebsd*/no | x86_64-*-freebsd*/no)
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86_64" >&5
$as_echo "FreeBSD x86_64" >&6; }
@ -4789,3 +4800,44 @@ fi
echo 1>&2 "CONFIGURE/MAKE IS NOT THE BEST WAY TO BUILD THE MPS -- see <manual/build.txt>"
# C. COPYRIGHT AND LICENSE
#
# Copyright (C) 2012-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
# All rights reserved. This is an open source license. Contact
# Ravenbrook for commercial licensing options.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Redistributions in any form must be accompanied by information on how
# to obtain complete source code for this software and any accompanying
# software that uses this software. The source code must either be
# included in the distribution or be available for no more than the cost
# of distribution plus a nominal fee, and must be freely redistributable
# under reasonable conditions. For an executable file, complete source
# code means the source code for all modules it contains. It does not
# include source code for modules or files that typically accompany the
# major components of the operating system on which the executable file
# runs.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -107,6 +107,7 @@ case $host/$CLANG in
CFLAGS="-I/usr/local/include"
CPP="$CC -I/usr/local/include -E"
PFMCFLAGS="$CFLAGS_GC"
;;
amd64-*-freebsd*/no | x86_64-*-freebsd*/no)
AC_MSG_RESULT([FreeBSD x86_64])
MPS_OS_NAME=fr

View file

@ -120,8 +120,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -473,8 +473,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -176,8 +176,8 @@ Document History
Copyright and License
---------------------
Copyright © 2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -616,8 +616,8 @@ Document History
Copyright and License
---------------------
Copyright © 2001-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -235,8 +235,8 @@ management of page table mapping.
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -751,8 +751,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -736,8 +736,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -138,8 +138,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -264,8 +264,8 @@ Document history
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -93,8 +93,8 @@ Document History
Copyright and License
---------------------
Copyright © 2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -419,8 +419,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -610,8 +610,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -193,8 +193,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -149,8 +149,8 @@ Document History
Copyright and License
---------------------
Copyright © 1996-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 1996-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -139,8 +139,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -73,8 +73,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -172,8 +172,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -144,8 +144,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -56,8 +56,8 @@ Document History
Copyright and License
---------------------
Copyright © 2015-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2015-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -114,6 +114,7 @@ version_ Software versions
vm_ Virtual mapping
vmo1_ VM Module on DEC Unix
vmso_ VM Design for Solaris
write-barrier_ Write Barrier
writef_ The WriteF function
====================== ================================================
@ -192,6 +193,7 @@ writef_ The WriteF function
.. _vm: vm
.. _vmo1: vmo1
.. _vmso: vmso
.. _write-barrier: write-barrier
.. _writef: writef
@ -225,6 +227,7 @@ Document History
- 2013-06-07 RB_ Converting to reST_. Linking to [RB_2002-06-18]_.
- 2014-01-29 RB_ The arena no longer manages generation zonesets.
- 2014-01-17 GDR_ Add abq, nailboard, range.
- 2016-03-22 RB_ Add write-barier.
.. _RB: http://www.ravenbrook.com/consultants/rb
.. _NB: http://www.ravenbrook.com/consultants/nb
@ -235,8 +238,8 @@ Document History
Copyright and License
---------------------
Copyright © 2002-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2002-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -405,8 +405,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -431,8 +431,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2015 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2015 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -163,8 +163,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -95,8 +95,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2015 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2015 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -287,8 +287,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -700,8 +700,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -317,8 +317,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -407,8 +407,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -214,8 +214,8 @@ Document History
Copyright and License
---------------------
Copyright © 2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -427,8 +427,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -78,8 +78,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -813,8 +813,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -497,8 +497,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -565,8 +565,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -265,8 +265,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -44,8 +44,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -685,8 +685,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -47,8 +47,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -22,9 +22,10 @@ pool class. This pool implements a first (or last) fit policy for
variable-sized manually-managed objects, with control over first/last,
segment preference high/low, and slot fit low/high.
The pool was created in a response to a belief that the ScriptWorks
EPDL/EPDR's first fit policy is beneficial for some classes of client
behaviour, but the performance of a linear free list was unacceptable.
_`.background`: The pool was created in a response to a belief that
the ScriptWorks EPDL/EPDR's first fit policy is beneficial for some
classes of client behaviour, but the performance of a linear free list
was unacceptable.
Overview
@ -115,8 +116,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -271,8 +271,8 @@ Document History
Copyright and License
---------------------
Copyright © 2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -165,8 +165,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -217,8 +217,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -540,8 +540,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -131,8 +131,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -368,8 +368,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -124,8 +124,8 @@ Document history
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -97,8 +97,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -24,7 +24,7 @@ _`.summary.subset`: The summary of reference seens by scan
There are two reasons that it is not an equality relation:
#. If the segment has had objects forwarded onto it then its summary
1. If the segment has had objects forwarded onto it then its summary
will get unioned with the summary of the segment that the object
was forwarded from. This may increase the summary. The forwarded
object of course may have a smaller summary (if such a thing were
@ -32,7 +32,7 @@ There are two reasons that it is not an equality relation:
reduce the summary. (The forwarding process may erroneously
introduce zones into the destination's summary).
#. A write barrier hit will set the summary to ``RefSetUNIV``.
2. A write barrier hit will set the summary to ``RefSetUNIV``.
The reason that ``ss.unfixedSummary`` is always a subset of the
previous summary is due to an "optimization" which has not been made

View file

@ -268,14 +268,14 @@ design.mps.protocol.overview.next-method_).
.. _design.mps.protocol.overview.next-method: protocol#overview.next-method
_`.split-merge.shield`: Split and merge methods may assume that the
segments they are manipulating are not in the shield cache.
segments they are manipulating are not in the shield queue.
_`.split-merge.shield.flush`: The shield cache is flushed before any
_`.split-merge.shield.flush`: The shield queue is flushed before any
split or merge methods are invoked.
_`.split-merge.shield.re-flush`: If a split or merge method performs
an operation on a segment which might cause the segment to be cached,
the method must flush the shield cache before returning or calling
an operation on a segment which might cause the segment to be queued,
the method must flush the shield queue before returning or calling
another split or merge method.
_`.split-merge.fail`: Split and merge methods might fail, in which

View file

@ -9,7 +9,7 @@ Shield
:Status: incomplete guide
:Revision: $Id$
:Copyright: See `Copyright and License`_.
:Index terms: pair: shield; design
:Index terms: pair: shield; design
Introduction
@ -25,8 +25,10 @@ _`.readership`: Any MPS developer. Not confidential.
Overview
--------
_`.over`: For incremental collection, we need *separate control* of
collector access and mutator (client) access to memory. The collector
_`.overview`: The MPS implements incremental garbage collection using
memory barriers implemented by a combination of hardware memory
protection and thread control. The MPS needs *separate control* of
collector access and mutator (client) access to memory: the collector
must be able to incrementally scan objects, without the mutator being
able to see them yet.
@ -38,117 +40,227 @@ limitation, and give the rest of the MPS the illusion that we can
control collector and mutator access separately.
Control of mutator access
-------------------------
Interface
---------
The MPS uses ``ShieldRaise()`` and ``ShieldLower()`` to forbid or
permit the mutator access to object memory (that is, memory allocated
by MPS).
Mutator access
..............
The shield provides ``ShieldRaise`` and ``ShieldLower`` to forbid or
permit the mutator access to object memory segments. Between these
two, a segment is said to have the shield "raised" (`.def.raised`_).
``void ShieldRaise(Arena arena, Seg seg, AccessSet mode)``
Prevent the mutator accessing the memory in the specified mode
(``AccessREAD``, ``AccessWRITE``, or both).
Prevent the mutator accessing the memory segment in the specified
mode (``AccessREAD``, ``AccessWRITE``, or both).
``void ShieldLower(Arena arena, Seg seg, AccessSet mode)``
Allow the mutator to access the memory in the specified mode
(``AccessREAD``, ``AccessWRITE``, or both).
Allow the mutator to access the memory segment in the specified
mode (``AccessREAD``, ``AccessWRITE``, or both).
If the mutator attempts an access that hits a shield, the MPS gets a
barrier hit (in the form of a fault, interrupt, exception), quickly
does some necessary work, and then makes the access succeed.
If the mutator attempts an access that hits the shield, the MPS gets
an OS-specific hardware protection fault which reaches
``ArenaAccess``, does whatever work is necessary, then lowers the
shield and returns to the mutator.
Some objects (for example registers) cannot be hardware protected: the
only way to prevent mutator access to them is to halt all mutator
threads. The MPS uses ``ShieldSuspend()`` and ``ShieldResume()`` to do
this.
``void ShieldSuspend(Arena arena)``
Stop all registered mutator threads.
``void ShieldResume(Arena arena)``
Resume all registered mutator threads.
``ShieldRaise`` and ``ShieldLower`` do *not* nest.
Control of collector access
---------------------------
Entering the shield
...................
When the collector wants to access object memory (that is, memory
allocated by MPS), it must first call ``ShieldEnter()``, then wrap any
accesses with a ``ShieldExpose()`` and ``ShieldCover()`` pair, and
finally call ``ShieldLeave()``.
The MPS can only gain exclusive access from "inside" the shield
(`.def.inside`_). To enter the shield, the MPS must call
``ShieldEnter``, and to leave it, the MPS must call ``ShieldLeave``.
``ShieldEnter()`` and ``ShieldLeave()`` are called by ``ArenaEnter()``
and ``ArenaLeave()`` (approximately) -- so the shield is always
entered when we are within MPS code (approximately).
``ShieldEnter`` and ``ShieldLeave`` are called by ``ArenaEnter`` and
``ArenaLeave`` so almost all of the MPS is is "inside" the shield.
``ShieldExpose()`` might for example be called around:
- format-scan (when scanning);
- format-skip (when marking grains in a non-moving fix);
- format-isMoved and ``AddrCopy()`` (during a copying fix);
- format-pad (during reclaim).
Collector access to segments
............................
Note that there is no need to call ``ShieldExpose()`` when accessing
pool management memory such as bit tables. This is not object
memory, is never (legally) accessed by the mutator, and so is never
shielded.
When the MPS wants to access object memory segments from inside the
shield, it must wrap any accesses with a ``ShieldExpose`` and
``ShieldCover`` pair. These calls nest. After a call to
``ShieldExpose`` a segment is said to be "exposed" until the last
nested call to ``ShieldCover``. The shield arranges that the MPS can
access the memory while it is exposed.
On common operating systems, the only way to allow collector access is
to allow access from the whole process, including the mutator. So if
the Shield is asked to allow collector access but deny mutator access,
it will halt all mutator threads to prevent any mutator access. The
Shield performs suspension and restart; normal collector code does not
need to worry about it.
A segment might for example be exposed during:
Collector code can make multiple sequential, overlapping, or nested
calls to ``ShieldExpose()`` on the same segment, as long as each is
balanced by a corresponding ``ShieldCover()`` before ``ShieldLeave()``
is called). A usage count is maintained on each segment in
``seg->depth``: a positive "depth" means a positive number of
outstanding *reasons* why the segment must be exposed to the collector.
When the usage count reaches zero, there is no longer any reason the
segment should be unprotected, and the Shield could re-instate
hardware protection.
- format-scan (when scanning);
- format-skip (when marking grains in a non-moving fix);
- format-isMoved and ``AddrCopy`` (during a copying fix);
- format-pad (during reclaim).
However, as a performance-improving hysteresis, the Shield defers
re-protection, maintaining a cache of the last ``ShieldCacheSIZE``
times a segment no longer had a reason to be collector-accessible.
Presence in the cache counts as a reason: segments in the cache have
``seg->depth`` increased by one. As segments get pushed out of the
cache, or at ``ShieldLeave()``, this artificial reason is
decremented from ``seg->depth``, and (if ``seg->depth`` is now zero)
the deferred reinstatement of hardware protection happens.
Note that there is no need to call ``ShieldExpose`` when accessing
pool management memory such as bit tables. This is not object memory,
is never (legally) accessed by the mutator, and so is never shielded.
So whenever hardware protection is temporarily removed to allow
collector access, there is a *nurse* that will ensure this protection
is re-established: the nurse is either the balancing ``ShieldCover()``
call in collector code, or an entry in the shield cache.
Similarly, a pool class that never raises the shield on its segments
need never expose them to gain access.
.. note::
1. Why is there a fixed-size cache? This is not the simple
approach! All we need is a chain of segs that might need their
hardware protection to be sync'd with their shield mode. Head
in the shield, and one pointer in each seg struct. I guess we
try hard to avoid bloating ``SegStruct`` (to maintain residency
in the processor cache). But is 16 the right size? A cache-miss
wastes two kernel calls.
Collector access to the unprotectable
.....................................
2. I don't like the cache code. For example, why does
``ShieldFlush()`` break out early if ``arena->shDepth`` is 0?
This should never happen until the cache is completely flushed,
that is, we have reached ``shCacheLimit``. Why does
``ShieldFlush()`` not reset ``shCacheLimit``? Why does
``flush()`` silently accept ``NULL`` cache entries?
When the MPS wants to access an unprotectable object from inside the
shield, it must wrap any accesses with a ``ShieldHold`` and
``ShieldRelease`` pair. This allows access to objects which cannot be
shielded by ``ShieldRaise``, such as:
3. Why is ``seg->depth`` never checked for overflow? It is only a
4-bit-wide bit field, currently.
- the stack and registers of mutator threads,
- lockless allocation point structures,
- areas of memory that can't be protected by operating system calls,
- unprotectable roots.
Richard Kistruck, 2006-12-19.
``void ShieldHold(Arena arena)``
Get exclusive access to the unprotectable.
``void ShieldRelease(Arena arena)``
Declare that exclusive access is no longer needed.
Mechanism
---------
On common operating systems, the only way to allow the MPS access is
to allow access from the whole process, including the mutator. So
``ShieldExpose`` will suspend all mutator threads to prevent any mutator
access, and so will ``ShieldRaise`` on an unexposed segment. The
shield handles suspending and resuming threads, and so the rest of
the MPS does not need to worry about it.
The MPS can make multiple sequential, overlapping, or nested calls to
``ShieldExpose`` on the same segment, as long as each is balanced by a
corresponding ``ShieldCover`` before ``ShieldLeave`` is called. A
usage count is maintained on each segment in ``seg->depth``. When the
usage count reaches zero, there is no longer any reason the segment
should be unprotected, and the shield may reinstate hardware
protection at any time.
However, as a performance-improving hysteresis, the shield defers
re-protection, maintaining a queue of segments that require attention
before mutator threads are resumed (`.impl.delay`_). While a segment is
in the queue, it has ``seg->queued`` set true.
This hysteresis allows the MPS to proceed with garbage collection
during a pause without actually setting hardware protection until it
returns to the mutator. This is particularly important on operating
systems where the protection is expensive and poorly implemented, such
as OS X.
The queue also ensures that no memory protection system calls will be
needed for incremental garbage collection if a complete collection
cycle occurs during one pause.
Implementation
--------------
_`.impl.delay`: The implementation of the shield avoids suspending
threads for as long as possible. When threads are suspended, it
maintains a queue of segments where the desired and actual protection
do not match. This queue is flushed on leaving the shield.
Definitions
...........
_`.def.raised`: A segment has the shield "raised" for an access mode
after a call to ``ShieldRaise`` and before a call to ``ShieldLower``
with that mode.
_`.def.exposed`: A segment is "exposed" after a call to ``ShieldExpose``
and before a call to ``ShieldLower``.
_`.def.synced`: A seg is synced if the prot and shield modes are the
same, and unsynced otherwise.
_`.def.depth`: The depth of a segment is defined as:
| depth ≔ #exposes #covers, where
| #exposes = the number of calls to ``ShieldExpose`` on the seg
| #covers = the number of calls to ``ShieldCover`` on the seg
``ShieldCover`` should not be called without a matching
``ShieldExpose``, so this figure should always be non-negative.
_`.def.total.depth`: The total depth is the sum of the depth over all
segments.
_`.def.outside`: Being outside the shield is being between calls to
``ShieldLeave`` and ``ShieldEnter``, and similarly _`.def.inside`: being
inside the shield is being between calls to ``ShieldEnter`` and
``ShieldLeave``. [In a multi-threaded MPS this would be per-thread.
RB 2016-03-18]
_`.def.shielded`: A segment is shielded if the shield mode is
non-zero. [As set by ShieldRaise.]
Properties
..........
_`.prop.outside.running`: The mutator may not be suspended while outside
the shield.
_`.prop.mutator.access`: An attempt by the mutator to access shielded
memory be pre-empted by a call to ``ArenaAccess``.
_`.prop.inside.access`: Inside the shield the MPS must be able to access
all unshielded segments and all exposed segments.
Invariants
..........
_`.inv.outside.running`: The mutator is not suspended while outside the
shield.
_`.inv.unsynced.suspended`: If any segment is not synced, the mutator is
suspended.
_`.inv.unsynced.depth`: All unsynced segments have positive depth or are
in the queue.
_`.inv.outside.depth`: The total depth is zero while outside the shield.
_`.inv.prot.shield`: The prot mode is never more than the shield mode.
_`.inv.expose.depth`: An exposed seg's depth is greater than zero.
_`.inv.expose.prot`: An exposed seg is not protected in the mode it was
exposed with.
Proof Hints
...........
Hints at proofs of properties from invariants.
_`.proof.outside`: `.inv.outside.running`_ directly ensures
`.prop.outside.running`_.
_`.proof.sync`: As the depth of a segment cannot be negative
| total depth = 0
| ⇒ for all segments, depth = 0
| ⇒ all segs are synced (by .inv.unsynced.depth)
_`.proof.access`: If the mutator is running then all segs must be
synced (`.inv.unsynced.suspend`_). Which means that the hardware
protection (protection mode) must reflect the software protection
(shield mode). Hence all shielded memory will be hardware protected
while the mutator is running. This ensures `.prop.mutator.access`_.
_`.proof.inside`: `.inv.prot.shield`_ and `.inv.expose.prot`_ ensure
`.prop.inside.access`_.
Initial ideas
@ -158,6 +270,99 @@ _`.ideas`: There never was an initial design document, but
[RB_1995-11-29]_ and [RB_1995-11-30]_ contain some initial ideas.
Improvement Ideas
-----------------
Mass exposure
.............
_`.improv.mass-expose`: If protection calls have a high overhead it might
be good to pre-emptively unprotect large ranges of memory when we
expose one segment. With the current design this would mean
discovering adjacent shielded segments and adding them to the queue.
The collector should take advantage of this by preferentially scanning
exposed segments during a pause.
Segment independence
....................
_`.improve.noseg`: The shield is implemented in terms of segments, using
fields in the segment structure to represent its state. This forces us
to (for example) flush the shield queue when deleting a segment. The
shield could keep track of protection and shielding independently,
possibly allowing greater coalescing and more efficient and flexible
use of system calls (see .improve.mass-expose).
Concurrent collection
.....................
_`.improv.concurrent`: The MPS currently does not collect concurrently,
however the only thing that makes it not-concurrent is a critical
point in the Shield abstraction where the MPS seeks to gain privileged
access to memory (usually in order to scan it for GC). The critical
point is where ShieldExpose in shield.c has to call ShieldHold to
preserve the shield invariants. This is the only point in the MPS that
prevents concurrency, and the rest of the MPS is designed to support
it.
The restriction could be removed if either:
* the MPS could use a different set of protections to the mutator
program
* the mutator program uses a software barrier
The first one is tricky, and the second one just hasn't come up in any
implementation we've been asked to make yet. Given a VM, it could
happen, and the MPS would be concurrent.
So, I believe there's nothing fundamentally non-concurrent about the
MPS design. It's kind of waiting to happen.
(Originally written at <http://news.ycombinator.com/item?id=4524036>.)
Early Resume
............
_`.improv.resume`: There is a tradeoff between delaying flushing the
shield queue (preventing unnecessary protection and allowing us to
coalesce) and resuming mutator threads. We could resume threads
earlier under some circumstances, such as before reclaim (which does
not need to interact with the mutator). Basically, it might be worth
resuming the mutator early in a pause if we know that we're unlikely
to suspend it again (no more calls to ``ShieldRaise`` or
``ShieldExpose`` on shielded segments).
Expose modes
............
_`.improv.expose-modes`: Would it be a good idea for ShieldExpose() to
take an AccessSet? It might be good if we didn't have to raise a write
barrier unless we want to write. When scanning (for instance), we may
not need to write, so when scanning a segment behind a write barrier
we shouldn't have to call mprotect(). That's a bit speculative: how
often do we scan a segment and not write to it. Alternatively, and
more speculatively, we could keep the write barrier up, handle the
(possibly nested) trap and *then* expose the shield. I'm just
scraping around for ways to reduce calls to mprotect().
Theoretically we can do this, but:
1. We're mostly a moving collector so we'll almost always want to
write to segments we scan. That could change if we do more
non-moving collection.
2. The main cost of protection is changing it at all, not whether we
change just read or write. On OS X, the main cost seems to be the
TLB flush, which affects wall-clock time of everything on the
processor!
References
----------
@ -183,14 +388,22 @@ Document History
- 2013-05-24 GDR_ Converted to reStructuredText.
- 2016-03-17 RB_ Updated for dynamic queueing and general code tidying
that has removed complaints.
- 2016-03-19 RB_ Updated for separate queued flag on segments, changes
of invariants, cross-references, and ideas for future improvement.
.. _GDR: http://www.ravenbrook.com/consultants/gdr/
.. _RB: http://www.ravenbrook.com/consultants/rb/
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -177,8 +177,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -186,8 +186,8 @@ Document History
Copyright and License
---------------------
Copyright © 2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -955,8 +955,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -327,8 +327,8 @@ Document History
Copyright and License
---------------------
Copyright © 2014-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2014-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -557,8 +557,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -457,8 +457,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -76,8 +76,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2016 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -115,8 +115,8 @@ Document History
Copyright and License
---------------------
Copyright © 2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -335,8 +335,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -349,8 +349,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -295,8 +295,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -717,8 +717,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -133,8 +133,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -61,8 +61,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -369,8 +369,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -72,8 +72,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -159,8 +159,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2014 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -0,0 +1,178 @@
.. mode: -*- rst -*-
Write barrier
=============
:Tag: design.mps.write-barrier
:Author: Richard Brooksby
:Date: 2016-03-18
:Status: incomplete design
:Revision: $Id$
:Copyright: See `Copyright and License`_.
:Index terms: pair: write barrier; design
Introduction
------------
.intro: This document explains the design of the write barrer of the
Memory Pool System (MPS).
.readership: This document is intended for developers of the MPS.
Overview
--------
.overview: The MPS uses a combination of hardware memory protection
and BIBOP techniques to maintain an approximate remembered set. The
remembered set keeps track of areas of memory that refer to each
other, so that the MPS can avoid scanning areas that are irrelevant
during a garbage collection. The MPS write barrier is implemented by
a one-word "summary" of the zones referenced by a segment. That
summary can be compared with the "white set" of a trace by a simple
logical AND operation.
Write Barrier Processes
-----------------------
.scan.summary: As the MPS scans a segment during garbage collection,
it accumulates a summary of references. This summary is represented
by single word ``ZoneSet``, derived from the bit patterns of the
references. After the scan the MPS can decide to store the summary
with the segment, and use it in future garbage collections to avoid
future scans.
If the summary does not intersect any of the zones containing
condemned objects, the MPS does not have to scan them in order to
determine if those objects are live.
The mutator could update the references in a segment and make the
summary invalid. To avoid this, when the MPS stores a summary, it
raises a write barrier on the segment memory. If the mutator does
update the segment, the barrier is hit, and the MPS resets the
summary, so that the segment will be scanned in future.
[At this point I was interrupted by a man from Porlock.]
Write barrier deferral
----------------------
.deferral: Both scanning and the write barrier cost CPU time, and
these must be balanced. There is no point spending 1000 CPU units
raising a write barrier to avoid 10 CPU units of scanning cost.
Therefore we do not raise the write barrier immediately.
.deferral.heuristic: We apply a simple heuristic: A segment which was
found to be "interesting" while scanning is likely to be interesting
again, and so raising the write barrier is not worthwhile. If we scan
a segment several times and find it "boring" then we raise the barrier
to avoid future boring scans.
.def.boring: A scan is "boring" if it was unnecessary for a garbage
collection because it found no references to condemned objects.
.def.interesting: A scan is "interesting" if it was not boring
(.def.boring). Note that this does not mean it preserved comdemned
objects, only that we would have scanned it even if we had had the
scan summary beforehand.
.deferral.count: We store a deferral count with the segment. The
count is decremented after each boring scan (.def.boring). The write
barrier is raised only when the count reaches zero.
.deferral.reset: The count is reset after three events:
1. segment creation (``WB_DEFER_INIT``)
2. an interesting scan (``WB_DEFER_DELAY``)
3. a barrier hit (``WB_DEFER_HIT``)
.deferral.dabble: The set of objects condemend by the garbage
collector changes, and so does what is interesting or boring. For
example, a collection of a nursery space in zone 3 might be followed
by a collection of a top generation in zone 7. This will upset
.deferral.heuristic somewhat. We assume that the garbage collector
will spend most of its time repeatedly collecting the same zones.
Improvements
------------
.improv.by-os: The overheads hardware barriers varies widely between
operating systems. On Windows it is very cheap to change memory
protection and to handle protecion faults. On OS X it is very
expensive. The balance between barriers and scanning work is
different. We should measure the relative costs and tune the deferral
for each separately.
.improv.balance: Hardware costs of write barriers vary by OS, but
scanning costs vary depending on many factors including client code.
The MPS could dynamically measure these costs, perhaps using fast
cycle counters such as RDTSC, and use this to dynamically balance the
write barrier deferral.
References
----------
.. [job003975] "Poor performance due to imbalance between protection
and scanning costs"; Richard Brooksby; Ravenbrook
Limited; 2016-03-11;
<http://www.ravenbrook.com/project/mps/issue/job003975>.
Document History
----------------
- 2016-03-19 RB_ Created during preparation of
branch/2016-03-13/defer-write-barrier for [job003975]_.
.. _RB: http://www.ravenbrook.com/consultants/rb/
Copyright and License
---------------------
Copyright © 2016 Ravenbrook Limited <http://www.ravenbrook.com/>. All
rights reserved. This is an open source license. Contact Ravenbrook
for commercial licensing options.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
#. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
#. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
#. Redistributions in any form must be accompanied by information on how
to obtain complete source code for this software and any
accompanying software that uses this software. The source code must
either be included in the distribution or be available for no more than
the cost of distribution plus a nominal fee, and must be freely
redistributable under reasonable conditions. For an executable file,
complete source code means the source code for all modules it contains.
It does not include source code for modules or files that typically
accompany the major components of the operating system on which the
executable file runs.
**This software is provided by the copyright holders and contributors
"as is" and any express or implied warranties, including, but not
limited to, the implied warranties of merchantability, fitness for a
particular purpose, or non-infringement, are disclaimed. In no event
shall the copyright holders and contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or
services; loss of use, data, or profits; or business interruption)
however caused and on any theory of liability, whether in contract,
strict liability, or tort (including negligence or otherwise) arising in
any way out of the use of this software, even if advised of the
possibility of such damage.**

View file

@ -158,8 +158,8 @@ Document History
Copyright and License
---------------------
Copyright © 2013-2015 Ravenbrook Limited. All rights reserved.
<http://www.ravenbrook.com/>. This is an open source license. Contact
Copyright © 2013-2015 Ravenbrook Limited <http://www.ravenbrook.com/>.
All rights reserved. This is an open source license. Contact
Ravenbrook for commercial licensing options.
Redistribution and use in source and binary forms, with or without

View file

@ -29,12 +29,14 @@ Design
prot
range
ring
sp
shield
sig
sp
splay
stack-scan
testthr
thread-manager
type
vm
write-barrier
writef

View file

@ -48,7 +48,6 @@ Old design
root
scan
seg
shield
strategy
telemetry
tests

View file

@ -70,10 +70,11 @@ MV interface
optional :term:`keyword arguments`:
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
:c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of the
addresses allocated (and freed) in the pool. The minimum
alignment supported by pools of this class is 1 (one)
and the maximum is the arena grain size
(see :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE`).
* :c:macro:`MPS_KEY_EXTEND_BY` (type :c:type:`size_t`,
default 65536) is the :term:`size` of block that the pool will

View file

@ -115,12 +115,11 @@ MVFF interface
efficient if this is wrong, but nothing will break.
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc`
or :c:func:`mps_free`, it will be rounded up to the pool's
alignment. The minimum alignment supported by pools of this
class is ``sizeof(void *)``.
:c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of the
addresses allocated (and freed) in the pool. The minimum
alignment supported by pools of this class is ``sizeof(void *)``
and the maximum is the arena grain size
(see :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE`).
* :c:macro:`MPS_KEY_SPARE` (type :c:type:`double`, default 0.75)
is the maximum proportion of memory that the pool will keep

View file

@ -115,12 +115,11 @@ MVT interface
optional :term:`keyword arguments`:
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
The minimum alignment supported by pools of this class is
``sizeof(void *)``.
:c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of the
addresses allocated (and freed) in the pool. The minimum
alignment supported by pools of this class is ``sizeof(void *)``
and the maximum is the arena grain size
(see :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE`).
* :c:macro:`MPS_KEY_MIN_SIZE` (type :c:type:`size_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the

View file

@ -141,6 +141,20 @@ Other changes
.. _job003938: https://www.ravenbrook.com/project/mps/issue/job003938/
#. The MPS is less aggressive in its use of hardware memory protection
to maintain :term:`write barrier` to speed up future collections.
This is particularly important for OS X, where memory protection is
poorly implemented. See job003371_ and job003975_.
#. The MPS coalesces memory protection, reducing the number of system
calls. This drastically improves real run time on operating systems
where memory protection is poorly implemented, such as OS X, but
also has a significant effect on Linux. See job003371_ and
job003975_.
.. _job003371: http://www.ravenbrook.com/project/mps/issue/job003371/
.. _job003975: http://www.ravenbrook.com/project/mps/issue/job003975/
.. _release-notes-1.114:

View file

@ -150,9 +150,9 @@ Client arenas
* :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE` (type :c:type:`size_t`,
default 8192) is the granularity with which the arena will
manage memory internally. It must be a power of 2. Larger
granularity reduces overheads, but increases
:term:`fragmentation` and :term:`retention`.
manage memory internally. It must be a power of 2, and at least
``sizeof(void *)``. Larger granularity reduces overheads, but
increases :term:`fragmentation` and :term:`retention`.
* :c:macro:`MPS_KEY_PAUSE_TIME` (type :c:type:`double`, default
0.1) is the maximum time, in seconds, that operations within the

Some files were not shown because too many files have changed in this diff Show more